2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
70 struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
79 struct list_head hash_list;
82 * LRU list for the stripe cache
84 struct list_head stripe_cache;
87 * for scheduling work in the helper threads
89 struct btrfs_work work;
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
105 struct list_head plug_list;
108 * flags that tell us if it is safe to
109 * merge with this bio
113 /* size of each individual stripe on disk */
116 /* number of data stripes (no p/q) */
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
128 enum btrfs_rbio_ops operation;
130 /* first bad stripe */
133 /* second bad stripe (for raid6 use) */
138 * number of pages needed to represent the full
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
154 atomic_t stripes_pending;
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
166 struct page **stripe_pages;
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
172 struct page **bio_pages;
175 * bitmap to record which horizontal stripe has data
177 unsigned long *dbitmap;
180 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182 static void rmw_work(struct btrfs_work *work);
183 static void read_rebuild_work(struct btrfs_work *work);
184 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
192 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
194 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
210 if (info->stripe_hash_table)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 table = kvzalloc(table_size, GFP_KERNEL);
225 spin_lock_init(&table->cache_lock);
226 INIT_LIST_HEAD(&table->stripe_cache);
230 for (i = 0; i < num_entries; i++) {
232 INIT_LIST_HEAD(&cur->hash_list);
233 spin_lock_init(&cur->lock);
234 init_waitqueue_head(&cur->wait);
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
244 * caching an rbio means to copy anything from the
245 * bio_pages array into the stripe_pages array. We
246 * use the page uptodate bit in the stripe cache array
247 * to indicate if it has valid data
249 * once the caching is done, we set the cache ready
252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
259 ret = alloc_rbio_pages(rbio);
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
270 memcpy(d, s, PAGE_SIZE);
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
280 * we hash on the first logical address of the stripe
282 static int rbio_bucket(struct btrfs_raid_bio *rbio)
284 u64 num = rbio->bbio->raid_map[0];
287 * we shift down quite a bit. We're using byte
288 * addressing, and most of the lower bits are zeros.
289 * This tends to upset hash_64, and it consistently
290 * returns just one or two different values.
292 * shifting off the lower bits fixes things.
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
298 * stealing an rbio means taking all the uptodate pages from the stripe
299 * array in the source rbio and putting them into the destination rbio
301 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
316 d = dest->stripe_pages[i];
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
330 * must be called with dest->rbio_list_lock held
332 static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 /* Also inherit the bitmaps from @victim. */
338 bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
339 dest->stripe_npages);
340 dest->generic_bio_cnt += victim->generic_bio_cnt;
341 bio_list_init(&victim->bio_list);
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
348 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
350 int bucket = rbio_bucket(rbio);
351 struct btrfs_stripe_hash_table *table;
352 struct btrfs_stripe_hash *h;
356 * check the bit again under the hash table lock.
358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
361 table = rbio->fs_info->stripe_hash_table;
362 h = table->table + bucket;
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
373 spin_lock(&rbio->bio_list_lock);
375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
376 list_del_init(&rbio->stripe_cache);
377 table->cache_size -= 1;
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
389 if (bio_list_empty(&rbio->bio_list)) {
390 if (!list_empty(&rbio->hash_list)) {
391 list_del_init(&rbio->hash_list);
392 refcount_dec(&rbio->refs);
393 BUG_ON(!list_empty(&rbio->plug_list));
398 spin_unlock(&rbio->bio_list_lock);
399 spin_unlock(&h->lock);
402 __free_raid_bio(rbio);
406 * prune a given rbio from the cache
408 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
410 struct btrfs_stripe_hash_table *table;
413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
416 table = rbio->fs_info->stripe_hash_table;
418 spin_lock_irqsave(&table->cache_lock, flags);
419 __remove_rbio_from_cache(rbio);
420 spin_unlock_irqrestore(&table->cache_lock, flags);
424 * remove everything in the cache
426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
428 struct btrfs_stripe_hash_table *table;
430 struct btrfs_raid_bio *rbio;
432 table = info->stripe_hash_table;
434 spin_lock_irqsave(&table->cache_lock, flags);
435 while (!list_empty(&table->stripe_cache)) {
436 rbio = list_entry(table->stripe_cache.next,
437 struct btrfs_raid_bio,
439 __remove_rbio_from_cache(rbio);
441 spin_unlock_irqrestore(&table->cache_lock, flags);
445 * remove all cached entries and free the hash table
448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
450 if (!info->stripe_hash_table)
452 btrfs_clear_rbio_cache(info);
453 kvfree(info->stripe_hash_table);
454 info->stripe_hash_table = NULL;
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
465 * If the size of the rbio cache is too big, we
468 static void cache_rbio(struct btrfs_raid_bio *rbio)
470 struct btrfs_stripe_hash_table *table;
473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
476 table = rbio->fs_info->stripe_hash_table;
478 spin_lock_irqsave(&table->cache_lock, flags);
479 spin_lock(&rbio->bio_list_lock);
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483 refcount_inc(&rbio->refs);
485 if (!list_empty(&rbio->stripe_cache)){
486 list_move(&rbio->stripe_cache, &table->stripe_cache);
488 list_add(&rbio->stripe_cache, &table->stripe_cache);
489 table->cache_size += 1;
492 spin_unlock(&rbio->bio_list_lock);
494 if (table->cache_size > RBIO_CACHE_SIZE) {
495 struct btrfs_raid_bio *found;
497 found = list_entry(table->stripe_cache.prev,
498 struct btrfs_raid_bio,
502 __remove_rbio_from_cache(found);
505 spin_unlock_irqrestore(&table->cache_lock, flags);
509 * helper function to run the xor_blocks api. It is only
510 * able to do MAX_XOR_BLOCKS at a time, so we need to
513 static void run_xor(void **pages, int src_cnt, ssize_t len)
517 void *dest = pages[src_cnt];
520 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
521 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
523 src_cnt -= xor_src_cnt;
524 src_off += xor_src_cnt;
529 * returns true if the bio list inside this rbio
530 * covers an entire stripe (no rmw required).
531 * Must be called with the bio list lock held, or
532 * at a time when you know it is impossible to add
533 * new bios into the list
535 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
537 unsigned long size = rbio->bio_list_bytes;
540 if (size != rbio->nr_data * rbio->stripe_len)
543 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
547 static int rbio_is_full(struct btrfs_raid_bio *rbio)
552 spin_lock_irqsave(&rbio->bio_list_lock, flags);
553 ret = __rbio_is_full(rbio);
554 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
559 * returns 1 if it is safe to merge two rbios together.
560 * The merging is safe if the two rbios correspond to
561 * the same stripe and if they are both going in the same
562 * direction (read vs write), and if neither one is
563 * locked for final IO
565 * The caller is responsible for locking such that
566 * rmw_locked is safe to test
568 static int rbio_can_merge(struct btrfs_raid_bio *last,
569 struct btrfs_raid_bio *cur)
571 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
572 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can
579 * steal from cached rbios though, other functions
582 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
583 test_bit(RBIO_CACHE_BIT, &cur->flags))
586 if (last->bbio->raid_map[0] !=
587 cur->bbio->raid_map[0])
590 /* we can't merge with different operations */
591 if (last->operation != cur->operation)
594 * We've need read the full stripe from the drive.
595 * check and repair the parity and write the new results.
597 * We're not allowed to add any new bios to the
598 * bio list here, anyone else that wants to
599 * change this stripe needs to do their own rmw.
601 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
602 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
605 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
606 cur->operation == BTRFS_RBIO_REBUILD_MISSING)
612 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
615 return stripe * rbio->stripe_npages + index;
619 * these are just the pages from the rbio array, not from anything
620 * the FS sent down to us
622 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
625 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
629 * helper to index into the pstripe
631 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
633 return rbio_stripe_page(rbio, rbio->nr_data, index);
637 * helper to index into the qstripe, returns null
638 * if there is no qstripe
640 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
642 if (rbio->nr_data + 1 == rbio->real_stripes)
644 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
648 * The first stripe in the table for a logical address
649 * has the lock. rbios are added in one of three ways:
651 * 1) Nobody has the stripe locked yet. The rbio is given
652 * the lock and 0 is returned. The caller must start the IO
655 * 2) Someone has the stripe locked, but we're able to merge
656 * with the lock owner. The rbio is freed and the IO will
657 * start automatically along with the existing rbio. 1 is returned.
659 * 3) Someone has the stripe locked, but we're not able to merge.
660 * The rbio is added to the lock owner's plug list, or merged into
661 * an rbio already on the plug list. When the lock owner unlocks,
662 * the next rbio on the list is run and the IO is started automatically.
665 * If we return 0, the caller still owns the rbio and must continue with
666 * IO submission. If we return 1, the caller must assume the rbio has
667 * already been freed.
669 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
671 int bucket = rbio_bucket(rbio);
672 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
673 struct btrfs_raid_bio *cur;
674 struct btrfs_raid_bio *pending;
677 struct btrfs_raid_bio *freeit = NULL;
678 struct btrfs_raid_bio *cache_drop = NULL;
681 spin_lock_irqsave(&h->lock, flags);
682 list_for_each_entry(cur, &h->hash_list, hash_list) {
683 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
684 spin_lock(&cur->bio_list_lock);
686 /* can we steal this cached rbio's pages? */
687 if (bio_list_empty(&cur->bio_list) &&
688 list_empty(&cur->plug_list) &&
689 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
690 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
691 list_del_init(&cur->hash_list);
692 refcount_dec(&cur->refs);
694 steal_rbio(cur, rbio);
696 spin_unlock(&cur->bio_list_lock);
701 /* can we merge into the lock owner? */
702 if (rbio_can_merge(cur, rbio)) {
703 merge_rbio(cur, rbio);
704 spin_unlock(&cur->bio_list_lock);
712 * we couldn't merge with the running
713 * rbio, see if we can merge with the
714 * pending ones. We don't have to
715 * check for rmw_locked because there
716 * is no way they are inside finish_rmw
719 list_for_each_entry(pending, &cur->plug_list,
721 if (rbio_can_merge(pending, rbio)) {
722 merge_rbio(pending, rbio);
723 spin_unlock(&cur->bio_list_lock);
730 /* no merging, put us on the tail of the plug list,
731 * our rbio will be started with the currently
732 * running rbio unlocks
734 list_add_tail(&rbio->plug_list, &cur->plug_list);
735 spin_unlock(&cur->bio_list_lock);
741 refcount_inc(&rbio->refs);
742 list_add(&rbio->hash_list, &h->hash_list);
744 spin_unlock_irqrestore(&h->lock, flags);
746 remove_rbio_from_cache(cache_drop);
748 __free_raid_bio(freeit);
753 * called as rmw or parity rebuild is completed. If the plug list has more
754 * rbios waiting for this stripe, the next one on the list will be started
756 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759 struct btrfs_stripe_hash *h;
763 bucket = rbio_bucket(rbio);
764 h = rbio->fs_info->stripe_hash_table->table + bucket;
766 if (list_empty(&rbio->plug_list))
769 spin_lock_irqsave(&h->lock, flags);
770 spin_lock(&rbio->bio_list_lock);
772 if (!list_empty(&rbio->hash_list)) {
774 * if we're still cached and there is no other IO
775 * to perform, just leave this rbio here for others
776 * to steal from later
778 if (list_empty(&rbio->plug_list) &&
779 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
781 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
782 BUG_ON(!bio_list_empty(&rbio->bio_list));
786 list_del_init(&rbio->hash_list);
787 refcount_dec(&rbio->refs);
790 * we use the plug list to hold all the rbios
791 * waiting for the chance to lock this stripe.
792 * hand the lock over to one of them.
794 if (!list_empty(&rbio->plug_list)) {
795 struct btrfs_raid_bio *next;
796 struct list_head *head = rbio->plug_list.next;
798 next = list_entry(head, struct btrfs_raid_bio,
801 list_del_init(&rbio->plug_list);
803 list_add(&next->hash_list, &h->hash_list);
804 refcount_inc(&next->refs);
805 spin_unlock(&rbio->bio_list_lock);
806 spin_unlock_irqrestore(&h->lock, flags);
808 if (next->operation == BTRFS_RBIO_READ_REBUILD)
809 async_read_rebuild(next);
810 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
811 steal_rbio(rbio, next);
812 async_read_rebuild(next);
813 } else if (next->operation == BTRFS_RBIO_WRITE) {
814 steal_rbio(rbio, next);
815 async_rmw_stripe(next);
816 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
817 steal_rbio(rbio, next);
818 async_scrub_parity(next);
823 * The barrier for this waitqueue_active is not needed,
824 * we're protected by h->lock and can't miss a wakeup.
826 } else if (waitqueue_active(&h->wait)) {
827 spin_unlock(&rbio->bio_list_lock);
828 spin_unlock_irqrestore(&h->lock, flags);
834 spin_unlock(&rbio->bio_list_lock);
835 spin_unlock_irqrestore(&h->lock, flags);
839 remove_rbio_from_cache(rbio);
842 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
846 if (!refcount_dec_and_test(&rbio->refs))
849 WARN_ON(!list_empty(&rbio->stripe_cache));
850 WARN_ON(!list_empty(&rbio->hash_list));
851 WARN_ON(!bio_list_empty(&rbio->bio_list));
853 for (i = 0; i < rbio->nr_pages; i++) {
854 if (rbio->stripe_pages[i]) {
855 __free_page(rbio->stripe_pages[i]);
856 rbio->stripe_pages[i] = NULL;
860 btrfs_put_bbio(rbio->bbio);
864 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
871 cur->bi_status = err;
878 * this frees the rbio and runs through all the bios in the
879 * bio_list and calls end_io on them
881 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
883 struct bio *cur = bio_list_get(&rbio->bio_list);
886 if (rbio->generic_bio_cnt)
887 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
889 * Clear the data bitmap, as the rbio may be cached for later usage.
890 * do this before before unlock_stripe() so there will be no new bio
893 bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
896 * At this moment, rbio->bio_list is empty, however since rbio does not
897 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
898 * hash list, rbio may be merged with others so that rbio->bio_list
900 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
901 * more and we can call bio_endio() on all queued bios.
904 extra = bio_list_get(&rbio->bio_list);
905 __free_raid_bio(rbio);
907 rbio_endio_bio_list(cur, err);
909 rbio_endio_bio_list(extra, err);
913 * end io function used by finish_rmw. When we finally
914 * get here, we've written a full stripe
916 static void raid_write_end_io(struct bio *bio)
918 struct btrfs_raid_bio *rbio = bio->bi_private;
919 blk_status_t err = bio->bi_status;
923 fail_bio_stripe(rbio, bio);
927 if (!atomic_dec_and_test(&rbio->stripes_pending))
932 /* OK, we have read all the stripes we need to. */
933 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
934 0 : rbio->bbio->max_errors;
935 if (atomic_read(&rbio->error) > max_errors)
938 rbio_orig_end_io(rbio, err);
942 * the read/modify/write code wants to use the original bio for
943 * any pages it included, and then use the rbio for everything
944 * else. This function decides if a given index (stripe number)
945 * and page number in that stripe fall inside the original bio
948 * if you set bio_list_only, you'll get a NULL back for any ranges
949 * that are outside the bio_list
951 * This doesn't take any refs on anything, you get a bare page pointer
952 * and the caller must bump refs as required.
954 * You must call index_rbio_pages once before you can trust
955 * the answers from this function.
957 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
958 int index, int pagenr, int bio_list_only)
961 struct page *p = NULL;
963 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
965 spin_lock_irq(&rbio->bio_list_lock);
966 p = rbio->bio_pages[chunk_page];
967 spin_unlock_irq(&rbio->bio_list_lock);
969 if (p || bio_list_only)
972 return rbio->stripe_pages[chunk_page];
976 * number of pages we need for the entire stripe across all the
979 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
981 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
985 * allocation and initial setup for the btrfs_raid_bio. Not
986 * this does not allocate any pages for rbio->pages.
988 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
989 struct btrfs_bio *bbio,
992 struct btrfs_raid_bio *rbio;
994 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
995 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
996 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
999 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
1000 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
1001 sizeof(long), GFP_NOFS);
1003 return ERR_PTR(-ENOMEM);
1005 bio_list_init(&rbio->bio_list);
1006 INIT_LIST_HEAD(&rbio->plug_list);
1007 spin_lock_init(&rbio->bio_list_lock);
1008 INIT_LIST_HEAD(&rbio->stripe_cache);
1009 INIT_LIST_HEAD(&rbio->hash_list);
1011 rbio->fs_info = fs_info;
1012 rbio->stripe_len = stripe_len;
1013 rbio->nr_pages = num_pages;
1014 rbio->real_stripes = real_stripes;
1015 rbio->stripe_npages = stripe_npages;
1018 refcount_set(&rbio->refs, 1);
1019 atomic_set(&rbio->error, 0);
1020 atomic_set(&rbio->stripes_pending, 0);
1023 * the stripe_pages and bio_pages array point to the extra
1024 * memory we allocated past the end of the rbio
1027 rbio->stripe_pages = p;
1028 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1029 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1031 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1032 nr_data = real_stripes - 1;
1033 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1034 nr_data = real_stripes - 2;
1038 rbio->nr_data = nr_data;
1042 /* allocate pages for all the stripes in the bio, including parity */
1043 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1048 for (i = 0; i < rbio->nr_pages; i++) {
1049 if (rbio->stripe_pages[i])
1051 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1054 rbio->stripe_pages[i] = page;
1059 /* only allocate pages for p/q stripes */
1060 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1065 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1067 for (; i < rbio->nr_pages; i++) {
1068 if (rbio->stripe_pages[i])
1070 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1073 rbio->stripe_pages[i] = page;
1079 * add a single page from a specific stripe into our list of bios for IO
1080 * this will try to merge into existing bios if possible, and returns
1081 * zero if all went well.
1083 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1084 struct bio_list *bio_list,
1087 unsigned long page_index,
1088 unsigned long bio_max_len)
1090 struct bio *last = bio_list->tail;
1094 struct btrfs_bio_stripe *stripe;
1097 stripe = &rbio->bbio->stripes[stripe_nr];
1098 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1100 /* if the device is missing, just fail this stripe */
1101 if (!stripe->dev->bdev)
1102 return fail_rbio_index(rbio, stripe_nr);
1104 /* see if we can add this page onto our existing bio */
1106 last_end = (u64)last->bi_iter.bi_sector << 9;
1107 last_end += last->bi_iter.bi_size;
1110 * we can't merge these if they are from different
1111 * devices or if they are not contiguous
1113 if (last_end == disk_start && stripe->dev->bdev &&
1115 last->bi_disk == stripe->dev->bdev->bd_disk &&
1116 last->bi_partno == stripe->dev->bdev->bd_partno) {
1117 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1118 if (ret == PAGE_SIZE)
1123 /* put a new bio on the list */
1124 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1125 bio->bi_iter.bi_size = 0;
1126 bio_set_dev(bio, stripe->dev->bdev);
1127 bio->bi_iter.bi_sector = disk_start >> 9;
1129 bio_add_page(bio, page, PAGE_SIZE, 0);
1130 bio_list_add(bio_list, bio);
1135 * while we're doing the read/modify/write cycle, we could
1136 * have errors in reading pages off the disk. This checks
1137 * for errors and if we're not able to read the page it'll
1138 * trigger parity reconstruction. The rmw will be finished
1139 * after we've reconstructed the failed stripes
1141 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1143 if (rbio->faila >= 0 || rbio->failb >= 0) {
1144 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1145 __raid56_parity_recover(rbio);
1152 * helper function to walk our bio list and populate the bio_pages array with
1153 * the result. This seems expensive, but it is faster than constantly
1154 * searching through the bio list as we setup the IO in finish_rmw or stripe
1157 * This must be called before you trust the answers from page_in_rbio
1159 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1163 unsigned long stripe_offset;
1164 unsigned long page_index;
1166 spin_lock_irq(&rbio->bio_list_lock);
1167 bio_list_for_each(bio, &rbio->bio_list) {
1168 struct bio_vec bvec;
1169 struct bvec_iter iter;
1172 start = (u64)bio->bi_iter.bi_sector << 9;
1173 stripe_offset = start - rbio->bbio->raid_map[0];
1174 page_index = stripe_offset >> PAGE_SHIFT;
1176 if (bio_flagged(bio, BIO_CLONED))
1177 bio->bi_iter = btrfs_io_bio(bio)->iter;
1179 bio_for_each_segment(bvec, bio, iter) {
1180 rbio->bio_pages[page_index + i] = bvec.bv_page;
1184 spin_unlock_irq(&rbio->bio_list_lock);
1188 * this is called from one of two situations. We either
1189 * have a full stripe from the higher layers, or we've read all
1190 * the missing bits off disk.
1192 * This will calculate the parity and then send down any
1195 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1197 struct btrfs_bio *bbio = rbio->bbio;
1198 void *pointers[rbio->real_stripes];
1199 int nr_data = rbio->nr_data;
1203 struct bio_list bio_list;
1207 bio_list_init(&bio_list);
1209 if (rbio->real_stripes - rbio->nr_data == 1)
1210 has_qstripe = false;
1211 else if (rbio->real_stripes - rbio->nr_data == 2)
1216 /* We should have at least one data sector. */
1217 ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
1219 /* at this point we either have a full stripe,
1220 * or we've read the full stripe from the drive.
1221 * recalculate the parity and write the new results.
1223 * We're not allowed to add any new bios to the
1224 * bio list here, anyone else that wants to
1225 * change this stripe needs to do their own rmw.
1227 spin_lock_irq(&rbio->bio_list_lock);
1228 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1229 spin_unlock_irq(&rbio->bio_list_lock);
1231 atomic_set(&rbio->error, 0);
1234 * now that we've set rmw_locked, run through the
1235 * bio list one last time and map the page pointers
1237 * We don't cache full rbios because we're assuming
1238 * the higher layers are unlikely to use this area of
1239 * the disk again soon. If they do use it again,
1240 * hopefully they will send another full bio.
1242 index_rbio_pages(rbio);
1243 if (!rbio_is_full(rbio))
1244 cache_rbio_pages(rbio);
1246 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1248 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1250 /* first collect one page from each data stripe */
1251 for (stripe = 0; stripe < nr_data; stripe++) {
1252 p = page_in_rbio(rbio, stripe, pagenr, 0);
1253 pointers[stripe] = kmap(p);
1256 /* then add the parity stripe */
1257 p = rbio_pstripe_page(rbio, pagenr);
1259 pointers[stripe++] = kmap(p);
1264 * raid6, add the qstripe and call the
1265 * library function to fill in our p/q
1267 p = rbio_qstripe_page(rbio, pagenr);
1269 pointers[stripe++] = kmap(p);
1271 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1275 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1276 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1280 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1281 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1285 * time to start writing. Make bios for everything from the
1286 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1289 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1290 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1293 /* This vertical stripe has no data, skip it. */
1294 if (!test_bit(pagenr, rbio->dbitmap))
1297 if (stripe < rbio->nr_data) {
1298 page = page_in_rbio(rbio, stripe, pagenr, 1);
1302 page = rbio_stripe_page(rbio, stripe, pagenr);
1305 ret = rbio_add_io_page(rbio, &bio_list,
1306 page, stripe, pagenr, rbio->stripe_len);
1312 if (likely(!bbio->num_tgtdevs))
1315 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1316 if (!bbio->tgtdev_map[stripe])
1319 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1322 /* This vertical stripe has no data, skip it. */
1323 if (!test_bit(pagenr, rbio->dbitmap))
1326 if (stripe < rbio->nr_data) {
1327 page = page_in_rbio(rbio, stripe, pagenr, 1);
1331 page = rbio_stripe_page(rbio, stripe, pagenr);
1334 ret = rbio_add_io_page(rbio, &bio_list, page,
1335 rbio->bbio->tgtdev_map[stripe],
1336 pagenr, rbio->stripe_len);
1343 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1344 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1347 bio = bio_list_pop(&bio_list);
1351 bio->bi_private = rbio;
1352 bio->bi_end_io = raid_write_end_io;
1353 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1360 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1364 * helper to find the stripe number for a given bio. Used to figure out which
1365 * stripe has failed. This expects the bio to correspond to a physical disk,
1366 * so it looks up based on physical sector numbers.
1368 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1371 u64 physical = bio->bi_iter.bi_sector;
1374 struct btrfs_bio_stripe *stripe;
1378 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1379 stripe = &rbio->bbio->stripes[i];
1380 stripe_start = stripe->physical;
1381 if (physical >= stripe_start &&
1382 physical < stripe_start + rbio->stripe_len &&
1383 stripe->dev->bdev &&
1384 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1385 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1393 * helper to find the stripe number for a given
1394 * bio (before mapping). Used to figure out which stripe has
1395 * failed. This looks up based on logical block numbers.
1397 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1400 u64 logical = bio->bi_iter.bi_sector;
1406 for (i = 0; i < rbio->nr_data; i++) {
1407 stripe_start = rbio->bbio->raid_map[i];
1408 if (logical >= stripe_start &&
1409 logical < stripe_start + rbio->stripe_len) {
1417 * returns -EIO if we had too many failures
1419 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1421 unsigned long flags;
1424 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1426 /* we already know this stripe is bad, move on */
1427 if (rbio->faila == failed || rbio->failb == failed)
1430 if (rbio->faila == -1) {
1431 /* first failure on this rbio */
1432 rbio->faila = failed;
1433 atomic_inc(&rbio->error);
1434 } else if (rbio->failb == -1) {
1435 /* second failure on this rbio */
1436 rbio->failb = failed;
1437 atomic_inc(&rbio->error);
1442 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1448 * helper to fail a stripe based on a physical disk
1451 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1454 int failed = find_bio_stripe(rbio, bio);
1459 return fail_rbio_index(rbio, failed);
1463 * this sets each page in the bio uptodate. It should only be used on private
1464 * rbio pages, nothing that comes in from the higher layers
1466 static void set_bio_pages_uptodate(struct bio *bio)
1468 struct bio_vec *bvec;
1471 ASSERT(!bio_flagged(bio, BIO_CLONED));
1473 bio_for_each_segment_all(bvec, bio, i)
1474 SetPageUptodate(bvec->bv_page);
1478 * end io for the read phase of the rmw cycle. All the bios here are physical
1479 * stripe bios we've read from the disk so we can recalculate the parity of the
1482 * This will usually kick off finish_rmw once all the bios are read in, but it
1483 * may trigger parity reconstruction if we had any errors along the way
1485 static void raid_rmw_end_io(struct bio *bio)
1487 struct btrfs_raid_bio *rbio = bio->bi_private;
1490 fail_bio_stripe(rbio, bio);
1492 set_bio_pages_uptodate(bio);
1496 if (!atomic_dec_and_test(&rbio->stripes_pending))
1499 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1503 * this will normally call finish_rmw to start our write
1504 * but if there are any failed stripes we'll reconstruct
1507 validate_rbio_for_rmw(rbio);
1512 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1515 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1517 btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1518 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1521 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1523 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1524 read_rebuild_work, NULL, NULL);
1526 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1530 * the stripe must be locked by the caller. It will
1531 * unlock after all the writes are done
1533 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1535 int bios_to_read = 0;
1536 struct bio_list bio_list;
1542 bio_list_init(&bio_list);
1544 ret = alloc_rbio_pages(rbio);
1548 index_rbio_pages(rbio);
1550 atomic_set(&rbio->error, 0);
1552 * build a list of bios to read all the missing parts of this
1555 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1556 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1559 * we want to find all the pages missing from
1560 * the rbio and read them from the disk. If
1561 * page_in_rbio finds a page in the bio list
1562 * we don't need to read it off the stripe.
1564 page = page_in_rbio(rbio, stripe, pagenr, 1);
1568 page = rbio_stripe_page(rbio, stripe, pagenr);
1570 * the bio cache may have handed us an uptodate
1571 * page. If so, be happy and use it
1573 if (PageUptodate(page))
1576 ret = rbio_add_io_page(rbio, &bio_list, page,
1577 stripe, pagenr, rbio->stripe_len);
1583 bios_to_read = bio_list_size(&bio_list);
1584 if (!bios_to_read) {
1586 * this can happen if others have merged with
1587 * us, it means there is nothing left to read.
1588 * But if there are missing devices it may not be
1589 * safe to do the full stripe write yet.
1595 * the bbio may be freed once we submit the last bio. Make sure
1596 * not to touch it after that
1598 atomic_set(&rbio->stripes_pending, bios_to_read);
1600 bio = bio_list_pop(&bio_list);
1604 bio->bi_private = rbio;
1605 bio->bi_end_io = raid_rmw_end_io;
1606 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1608 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1612 /* the actual write will happen once the reads are done */
1616 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1620 validate_rbio_for_rmw(rbio);
1625 * if the upper layers pass in a full stripe, we thank them by only allocating
1626 * enough pages to hold the parity, and sending it all down quickly.
1628 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1632 ret = alloc_rbio_parity_pages(rbio);
1634 __free_raid_bio(rbio);
1638 ret = lock_stripe_add(rbio);
1645 * partial stripe writes get handed over to async helpers.
1646 * We're really hoping to merge a few more writes into this
1647 * rbio before calculating new parity
1649 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1653 ret = lock_stripe_add(rbio);
1655 async_rmw_stripe(rbio);
1660 * sometimes while we were reading from the drive to
1661 * recalculate parity, enough new bios come into create
1662 * a full stripe. So we do a check here to see if we can
1663 * go directly to finish_rmw
1665 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1667 /* head off into rmw land if we don't have a full stripe */
1668 if (!rbio_is_full(rbio))
1669 return partial_stripe_write(rbio);
1670 return full_stripe_write(rbio);
1674 * We use plugging call backs to collect full stripes.
1675 * Any time we get a partial stripe write while plugged
1676 * we collect it into a list. When the unplug comes down,
1677 * we sort the list by logical block number and merge
1678 * everything we can into the same rbios
1680 struct btrfs_plug_cb {
1681 struct blk_plug_cb cb;
1682 struct btrfs_fs_info *info;
1683 struct list_head rbio_list;
1684 struct btrfs_work work;
1688 * rbios on the plug list are sorted for easier merging.
1690 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1692 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1694 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1696 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1697 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1699 if (a_sector < b_sector)
1701 if (a_sector > b_sector)
1706 static void run_plug(struct btrfs_plug_cb *plug)
1708 struct btrfs_raid_bio *cur;
1709 struct btrfs_raid_bio *last = NULL;
1712 * sort our plug list then try to merge
1713 * everything we can in hopes of creating full
1716 list_sort(NULL, &plug->rbio_list, plug_cmp);
1717 while (!list_empty(&plug->rbio_list)) {
1718 cur = list_entry(plug->rbio_list.next,
1719 struct btrfs_raid_bio, plug_list);
1720 list_del_init(&cur->plug_list);
1722 if (rbio_is_full(cur)) {
1723 /* we have a full stripe, send it down */
1724 full_stripe_write(cur);
1728 if (rbio_can_merge(last, cur)) {
1729 merge_rbio(last, cur);
1730 __free_raid_bio(cur);
1734 __raid56_parity_write(last);
1739 __raid56_parity_write(last);
1745 * if the unplug comes from schedule, we have to push the
1746 * work off to a helper thread
1748 static void unplug_work(struct btrfs_work *work)
1750 struct btrfs_plug_cb *plug;
1751 plug = container_of(work, struct btrfs_plug_cb, work);
1755 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1757 struct btrfs_plug_cb *plug;
1758 plug = container_of(cb, struct btrfs_plug_cb, cb);
1760 if (from_schedule) {
1761 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1762 unplug_work, NULL, NULL);
1763 btrfs_queue_work(plug->info->rmw_workers,
1770 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1771 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1773 const struct btrfs_fs_info *fs_info = rbio->fs_info;
1774 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1775 const u64 full_stripe_start = rbio->bbio->raid_map[0];
1776 const u32 orig_len = orig_bio->bi_iter.bi_size;
1777 const u32 sectorsize = fs_info->sectorsize;
1780 ASSERT(orig_logical >= full_stripe_start &&
1781 orig_logical + orig_len <= full_stripe_start +
1782 rbio->nr_data * rbio->stripe_len);
1784 bio_list_add(&rbio->bio_list, orig_bio);
1785 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1787 /* Update the dbitmap. */
1788 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1789 cur_logical += sectorsize) {
1790 int bit = ((u32)(cur_logical - full_stripe_start) >>
1791 PAGE_SHIFT) % rbio->stripe_npages;
1793 set_bit(bit, rbio->dbitmap);
1798 * our main entry point for writes from the rest of the FS.
1800 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1801 struct btrfs_bio *bbio, u64 stripe_len)
1803 struct btrfs_raid_bio *rbio;
1804 struct btrfs_plug_cb *plug = NULL;
1805 struct blk_plug_cb *cb;
1808 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1810 btrfs_put_bbio(bbio);
1811 return PTR_ERR(rbio);
1813 rbio->operation = BTRFS_RBIO_WRITE;
1814 rbio_add_bio(rbio, bio);
1816 btrfs_bio_counter_inc_noblocked(fs_info);
1817 rbio->generic_bio_cnt = 1;
1820 * don't plug on full rbios, just get them out the door
1821 * as quickly as we can
1823 if (rbio_is_full(rbio)) {
1824 ret = full_stripe_write(rbio);
1826 btrfs_bio_counter_dec(fs_info);
1830 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1832 plug = container_of(cb, struct btrfs_plug_cb, cb);
1834 plug->info = fs_info;
1835 INIT_LIST_HEAD(&plug->rbio_list);
1837 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1840 ret = __raid56_parity_write(rbio);
1842 btrfs_bio_counter_dec(fs_info);
1848 * all parity reconstruction happens here. We've read in everything
1849 * we can find from the drives and this does the heavy lifting of
1850 * sorting the good from the bad.
1852 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1856 int faila = -1, failb = -1;
1861 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1863 err = BLK_STS_RESOURCE;
1867 faila = rbio->faila;
1868 failb = rbio->failb;
1870 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1871 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1872 spin_lock_irq(&rbio->bio_list_lock);
1873 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1874 spin_unlock_irq(&rbio->bio_list_lock);
1877 index_rbio_pages(rbio);
1879 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1881 * Now we just use bitmap to mark the horizontal stripes in
1882 * which we have data when doing parity scrub.
1884 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1885 !test_bit(pagenr, rbio->dbitmap))
1888 /* setup our array of pointers with pages
1891 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1893 * if we're rebuilding a read, we have to use
1894 * pages from the bio list
1896 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1897 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1898 (stripe == faila || stripe == failb)) {
1899 page = page_in_rbio(rbio, stripe, pagenr, 0);
1901 page = rbio_stripe_page(rbio, stripe, pagenr);
1903 pointers[stripe] = kmap(page);
1906 /* all raid6 handling here */
1907 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1909 * single failure, rebuild from parity raid5
1913 if (faila == rbio->nr_data) {
1915 * Just the P stripe has failed, without
1916 * a bad data or Q stripe.
1917 * TODO, we should redo the xor here.
1919 err = BLK_STS_IOERR;
1923 * a single failure in raid6 is rebuilt
1924 * in the pstripe code below
1929 /* make sure our ps and qs are in order */
1930 if (faila > failb) {
1936 /* if the q stripe is failed, do a pstripe reconstruction
1938 * If both the q stripe and the P stripe are failed, we're
1939 * here due to a crc mismatch and we can't give them the
1942 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1943 if (rbio->bbio->raid_map[faila] ==
1945 err = BLK_STS_IOERR;
1949 * otherwise we have one bad data stripe and
1950 * a good P stripe. raid5!
1955 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1956 raid6_datap_recov(rbio->real_stripes,
1957 PAGE_SIZE, faila, pointers);
1959 raid6_2data_recov(rbio->real_stripes,
1960 PAGE_SIZE, faila, failb,
1966 /* rebuild from P stripe here (raid5 or raid6) */
1967 BUG_ON(failb != -1);
1969 /* Copy parity block into failed block to start with */
1970 memcpy(pointers[faila],
1971 pointers[rbio->nr_data],
1974 /* rearrange the pointer array */
1975 p = pointers[faila];
1976 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1977 pointers[stripe] = pointers[stripe + 1];
1978 pointers[rbio->nr_data - 1] = p;
1980 /* xor in the rest */
1981 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1983 /* if we're doing this rebuild as part of an rmw, go through
1984 * and set all of our private rbio pages in the
1985 * failed stripes as uptodate. This way finish_rmw will
1986 * know they can be trusted. If this was a read reconstruction,
1987 * other endio functions will fiddle the uptodate bits
1989 if (rbio->operation == BTRFS_RBIO_WRITE) {
1990 for (i = 0; i < rbio->stripe_npages; i++) {
1992 page = rbio_stripe_page(rbio, faila, i);
1993 SetPageUptodate(page);
1996 page = rbio_stripe_page(rbio, failb, i);
1997 SetPageUptodate(page);
2001 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2003 * if we're rebuilding a read, we have to use
2004 * pages from the bio list
2006 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2007 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
2008 (stripe == faila || stripe == failb)) {
2009 page = page_in_rbio(rbio, stripe, pagenr, 0);
2011 page = rbio_stripe_page(rbio, stripe, pagenr);
2022 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
2023 if (err == BLK_STS_OK)
2024 cache_rbio_pages(rbio);
2026 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2028 rbio_orig_end_io(rbio, err);
2029 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2030 rbio_orig_end_io(rbio, err);
2031 } else if (err == BLK_STS_OK) {
2035 if (rbio->operation == BTRFS_RBIO_WRITE)
2037 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2038 finish_parity_scrub(rbio, 0);
2042 rbio_orig_end_io(rbio, err);
2047 * This is called only for stripes we've read from disk to
2048 * reconstruct the parity.
2050 static void raid_recover_end_io(struct bio *bio)
2052 struct btrfs_raid_bio *rbio = bio->bi_private;
2055 * we only read stripe pages off the disk, set them
2056 * up to date if there were no errors
2059 fail_bio_stripe(rbio, bio);
2061 set_bio_pages_uptodate(bio);
2064 if (!atomic_dec_and_test(&rbio->stripes_pending))
2067 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2068 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2070 __raid_recover_end_io(rbio);
2074 * reads everything we need off the disk to reconstruct
2075 * the parity. endio handlers trigger final reconstruction
2076 * when the IO is done.
2078 * This is used both for reads from the higher layers and for
2079 * parity construction required to finish a rmw cycle.
2081 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2083 int bios_to_read = 0;
2084 struct bio_list bio_list;
2090 bio_list_init(&bio_list);
2092 ret = alloc_rbio_pages(rbio);
2096 atomic_set(&rbio->error, 0);
2099 * Read everything that hasn't failed. However this time we will
2100 * not trust any cached sector.
2101 * As we may read out some stale data but higher layer is not reading
2104 * So here we always re-read everything in recovery path.
2106 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2107 if (rbio->faila == stripe || rbio->failb == stripe) {
2108 atomic_inc(&rbio->error);
2112 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2113 ret = rbio_add_io_page(rbio, &bio_list,
2114 rbio_stripe_page(rbio, stripe, pagenr),
2115 stripe, pagenr, rbio->stripe_len);
2121 bios_to_read = bio_list_size(&bio_list);
2122 if (!bios_to_read) {
2124 * we might have no bios to read just because the pages
2125 * were up to date, or we might have no bios to read because
2126 * the devices were gone.
2128 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2129 __raid_recover_end_io(rbio);
2137 * the bbio may be freed once we submit the last bio. Make sure
2138 * not to touch it after that
2140 atomic_set(&rbio->stripes_pending, bios_to_read);
2142 bio = bio_list_pop(&bio_list);
2146 bio->bi_private = rbio;
2147 bio->bi_end_io = raid_recover_end_io;
2148 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2150 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2158 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2159 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2160 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2165 * the main entry point for reads from the higher layers. This
2166 * is really only called when the normal read path had a failure,
2167 * so we assume the bio they send down corresponds to a failed part
2170 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2171 struct btrfs_bio *bbio, u64 stripe_len,
2172 int mirror_num, int generic_io)
2174 struct btrfs_raid_bio *rbio;
2178 ASSERT(bbio->mirror_num == mirror_num);
2179 btrfs_io_bio(bio)->mirror_num = mirror_num;
2182 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2185 btrfs_put_bbio(bbio);
2186 return PTR_ERR(rbio);
2189 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2190 rbio_add_bio(rbio, bio);
2192 rbio->faila = find_logical_bio_stripe(rbio, bio);
2193 if (rbio->faila == -1) {
2195 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2196 __func__, (u64)bio->bi_iter.bi_sector << 9,
2197 (u64)bio->bi_iter.bi_size, bbio->map_type);
2199 btrfs_put_bbio(bbio);
2205 btrfs_bio_counter_inc_noblocked(fs_info);
2206 rbio->generic_bio_cnt = 1;
2208 btrfs_get_bbio(bbio);
2213 * for 'mirror == 2', reconstruct from all other stripes.
2214 * for 'mirror_num > 2', select a stripe to fail on every retry.
2216 if (mirror_num > 2) {
2218 * 'mirror == 3' is to fail the p stripe and
2219 * reconstruct from the q stripe. 'mirror > 3' is to
2220 * fail a data stripe and reconstruct from p+q stripe.
2222 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2223 ASSERT(rbio->failb > 0);
2224 if (rbio->failb <= rbio->faila)
2228 ret = lock_stripe_add(rbio);
2231 * __raid56_parity_recover will end the bio with
2232 * any errors it hits. We don't want to return
2233 * its error value up the stack because our caller
2234 * will end up calling bio_endio with any nonzero
2238 __raid56_parity_recover(rbio);
2240 * our rbio has been added to the list of
2241 * rbios that will be handled after the
2242 * currently lock owner is done
2248 static void rmw_work(struct btrfs_work *work)
2250 struct btrfs_raid_bio *rbio;
2252 rbio = container_of(work, struct btrfs_raid_bio, work);
2253 raid56_rmw_stripe(rbio);
2256 static void read_rebuild_work(struct btrfs_work *work)
2258 struct btrfs_raid_bio *rbio;
2260 rbio = container_of(work, struct btrfs_raid_bio, work);
2261 __raid56_parity_recover(rbio);
2265 * The following code is used to scrub/replace the parity stripe
2267 * Caller must have already increased bio_counter for getting @bbio.
2269 * Note: We need make sure all the pages that add into the scrub/replace
2270 * raid bio are correct and not be changed during the scrub/replace. That
2271 * is those pages just hold metadata or file data with checksum.
2274 struct btrfs_raid_bio *
2275 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2276 struct btrfs_bio *bbio, u64 stripe_len,
2277 struct btrfs_device *scrub_dev,
2278 unsigned long *dbitmap, int stripe_nsectors)
2280 struct btrfs_raid_bio *rbio;
2283 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2286 bio_list_add(&rbio->bio_list, bio);
2288 * This is a special bio which is used to hold the completion handler
2289 * and make the scrub rbio is similar to the other types
2291 ASSERT(!bio->bi_iter.bi_size);
2292 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2294 for (i = 0; i < rbio->real_stripes; i++) {
2295 if (bbio->stripes[i].dev == scrub_dev) {
2301 /* Now we just support the sectorsize equals to page size */
2302 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2303 ASSERT(rbio->stripe_npages == stripe_nsectors);
2304 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2307 * We have already increased bio_counter when getting bbio, record it
2308 * so we can free it at rbio_orig_end_io().
2310 rbio->generic_bio_cnt = 1;
2315 /* Used for both parity scrub and missing. */
2316 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2322 ASSERT(logical >= rbio->bbio->raid_map[0]);
2323 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2324 rbio->stripe_len * rbio->nr_data);
2325 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2326 index = stripe_offset >> PAGE_SHIFT;
2327 rbio->bio_pages[index] = page;
2331 * We just scrub the parity that we have correct data on the same horizontal,
2332 * so we needn't allocate all pages for all the stripes.
2334 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2341 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2342 for (i = 0; i < rbio->real_stripes; i++) {
2343 index = i * rbio->stripe_npages + bit;
2344 if (rbio->stripe_pages[index])
2347 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2350 rbio->stripe_pages[index] = page;
2356 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2359 struct btrfs_bio *bbio = rbio->bbio;
2360 void *pointers[rbio->real_stripes];
2361 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2362 int nr_data = rbio->nr_data;
2366 struct page *p_page = NULL;
2367 struct page *q_page = NULL;
2368 struct bio_list bio_list;
2373 bio_list_init(&bio_list);
2375 if (rbio->real_stripes - rbio->nr_data == 1)
2376 has_qstripe = false;
2377 else if (rbio->real_stripes - rbio->nr_data == 2)
2382 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2384 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2388 * Because the higher layers(scrubber) are unlikely to
2389 * use this area of the disk again soon, so don't cache
2392 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2397 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2400 SetPageUptodate(p_page);
2403 /* RAID6, allocate and map temp space for the Q stripe */
2404 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2406 __free_page(p_page);
2409 SetPageUptodate(q_page);
2410 pointers[rbio->real_stripes - 1] = kmap(q_page);
2413 atomic_set(&rbio->error, 0);
2415 /* Map the parity stripe just once */
2416 pointers[nr_data] = kmap(p_page);
2418 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2421 /* first collect one page from each data stripe */
2422 for (stripe = 0; stripe < nr_data; stripe++) {
2423 p = page_in_rbio(rbio, stripe, pagenr, 0);
2424 pointers[stripe] = kmap(p);
2428 /* RAID6, call the library function to fill in our P/Q */
2429 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2433 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2434 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2437 /* Check scrubbing parity and repair it */
2438 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2440 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2441 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2443 /* Parity is right, needn't writeback */
2444 bitmap_clear(rbio->dbitmap, pagenr, 1);
2447 for (stripe = 0; stripe < nr_data; stripe++)
2448 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2452 __free_page(p_page);
2455 __free_page(q_page);
2460 * time to start writing. Make bios for everything from the
2461 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2464 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2467 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2468 ret = rbio_add_io_page(rbio, &bio_list,
2469 page, rbio->scrubp, pagenr, rbio->stripe_len);
2477 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2480 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2481 ret = rbio_add_io_page(rbio, &bio_list, page,
2482 bbio->tgtdev_map[rbio->scrubp],
2483 pagenr, rbio->stripe_len);
2489 nr_data = bio_list_size(&bio_list);
2491 /* Every parity is right */
2492 rbio_orig_end_io(rbio, BLK_STS_OK);
2496 atomic_set(&rbio->stripes_pending, nr_data);
2499 bio = bio_list_pop(&bio_list);
2503 bio->bi_private = rbio;
2504 bio->bi_end_io = raid_write_end_io;
2505 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2512 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2515 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2517 if (stripe >= 0 && stripe < rbio->nr_data)
2523 * While we're doing the parity check and repair, we could have errors
2524 * in reading pages off the disk. This checks for errors and if we're
2525 * not able to read the page it'll trigger parity reconstruction. The
2526 * parity scrub will be finished after we've reconstructed the failed
2529 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2531 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2534 if (rbio->faila >= 0 || rbio->failb >= 0) {
2535 int dfail = 0, failp = -1;
2537 if (is_data_stripe(rbio, rbio->faila))
2539 else if (is_parity_stripe(rbio->faila))
2540 failp = rbio->faila;
2542 if (is_data_stripe(rbio, rbio->failb))
2544 else if (is_parity_stripe(rbio->failb))
2545 failp = rbio->failb;
2548 * Because we can not use a scrubbing parity to repair
2549 * the data, so the capability of the repair is declined.
2550 * (In the case of RAID5, we can not repair anything)
2552 if (dfail > rbio->bbio->max_errors - 1)
2556 * If all data is good, only parity is correctly, just
2557 * repair the parity.
2560 finish_parity_scrub(rbio, 0);
2565 * Here means we got one corrupted data stripe and one
2566 * corrupted parity on RAID6, if the corrupted parity
2567 * is scrubbing parity, luckily, use the other one to repair
2568 * the data, or we can not repair the data stripe.
2570 if (failp != rbio->scrubp)
2573 __raid_recover_end_io(rbio);
2575 finish_parity_scrub(rbio, 1);
2580 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2584 * end io for the read phase of the rmw cycle. All the bios here are physical
2585 * stripe bios we've read from the disk so we can recalculate the parity of the
2588 * This will usually kick off finish_rmw once all the bios are read in, but it
2589 * may trigger parity reconstruction if we had any errors along the way
2591 static void raid56_parity_scrub_end_io(struct bio *bio)
2593 struct btrfs_raid_bio *rbio = bio->bi_private;
2596 fail_bio_stripe(rbio, bio);
2598 set_bio_pages_uptodate(bio);
2602 if (!atomic_dec_and_test(&rbio->stripes_pending))
2606 * this will normally call finish_rmw to start our write
2607 * but if there are any failed stripes we'll reconstruct
2610 validate_rbio_for_parity_scrub(rbio);
2613 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2615 int bios_to_read = 0;
2616 struct bio_list bio_list;
2622 ret = alloc_rbio_essential_pages(rbio);
2626 bio_list_init(&bio_list);
2628 atomic_set(&rbio->error, 0);
2630 * build a list of bios to read all the missing parts of this
2633 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2634 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2637 * we want to find all the pages missing from
2638 * the rbio and read them from the disk. If
2639 * page_in_rbio finds a page in the bio list
2640 * we don't need to read it off the stripe.
2642 page = page_in_rbio(rbio, stripe, pagenr, 1);
2646 page = rbio_stripe_page(rbio, stripe, pagenr);
2648 * the bio cache may have handed us an uptodate
2649 * page. If so, be happy and use it
2651 if (PageUptodate(page))
2654 ret = rbio_add_io_page(rbio, &bio_list, page,
2655 stripe, pagenr, rbio->stripe_len);
2661 bios_to_read = bio_list_size(&bio_list);
2662 if (!bios_to_read) {
2664 * this can happen if others have merged with
2665 * us, it means there is nothing left to read.
2666 * But if there are missing devices it may not be
2667 * safe to do the full stripe write yet.
2673 * the bbio may be freed once we submit the last bio. Make sure
2674 * not to touch it after that
2676 atomic_set(&rbio->stripes_pending, bios_to_read);
2678 bio = bio_list_pop(&bio_list);
2682 bio->bi_private = rbio;
2683 bio->bi_end_io = raid56_parity_scrub_end_io;
2684 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2686 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2690 /* the actual write will happen once the reads are done */
2694 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2698 validate_rbio_for_parity_scrub(rbio);
2701 static void scrub_parity_work(struct btrfs_work *work)
2703 struct btrfs_raid_bio *rbio;
2705 rbio = container_of(work, struct btrfs_raid_bio, work);
2706 raid56_parity_scrub_stripe(rbio);
2709 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2711 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2712 scrub_parity_work, NULL, NULL);
2714 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2717 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2719 if (!lock_stripe_add(rbio))
2720 async_scrub_parity(rbio);
2723 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2725 struct btrfs_raid_bio *
2726 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2727 struct btrfs_bio *bbio, u64 length)
2729 struct btrfs_raid_bio *rbio;
2731 rbio = alloc_rbio(fs_info, bbio, length);
2735 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2736 bio_list_add(&rbio->bio_list, bio);
2738 * This is a special bio which is used to hold the completion handler
2739 * and make the scrub rbio is similar to the other types
2741 ASSERT(!bio->bi_iter.bi_size);
2743 rbio->faila = find_logical_bio_stripe(rbio, bio);
2744 if (rbio->faila == -1) {
2751 * When we get bbio, we have already increased bio_counter, record it
2752 * so we can free it at rbio_orig_end_io()
2754 rbio->generic_bio_cnt = 1;
2759 static void missing_raid56_work(struct btrfs_work *work)
2761 struct btrfs_raid_bio *rbio;
2763 rbio = container_of(work, struct btrfs_raid_bio, work);
2764 __raid56_parity_recover(rbio);
2767 static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2769 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2770 missing_raid56_work, NULL, NULL);
2772 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2775 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2777 if (!lock_stripe_add(rbio))
2778 async_missing_raid56(rbio);