GNU Linux-libre 4.9.308-gnu1
[releases.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         atomic_t                refs;
68         struct btrfs_bio        *bbio;
69         u64                     map_length;
70 };
71
72 struct scrub_page {
73         struct scrub_block      *sblock;
74         struct page             *page;
75         struct btrfs_device     *dev;
76         struct list_head        list;
77         u64                     flags;  /* extent flags */
78         u64                     generation;
79         u64                     logical;
80         u64                     physical;
81         u64                     physical_for_dev_replace;
82         atomic_t                refs;
83         struct {
84                 unsigned int    mirror_num:8;
85                 unsigned int    have_csum:1;
86                 unsigned int    io_error:1;
87         };
88         u8                      csum[BTRFS_CSUM_SIZE];
89
90         struct scrub_recover    *recover;
91 };
92
93 struct scrub_bio {
94         int                     index;
95         struct scrub_ctx        *sctx;
96         struct btrfs_device     *dev;
97         struct bio              *bio;
98         int                     err;
99         u64                     logical;
100         u64                     physical;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
103 #else
104         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
105 #endif
106         int                     page_count;
107         int                     next_free;
108         struct btrfs_work       work;
109 };
110
111 struct scrub_block {
112         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113         int                     page_count;
114         atomic_t                outstanding_pages;
115         atomic_t                refs; /* free mem on transition to zero */
116         struct scrub_ctx        *sctx;
117         struct scrub_parity     *sparity;
118         struct {
119                 unsigned int    header_error:1;
120                 unsigned int    checksum_error:1;
121                 unsigned int    no_io_error_seen:1;
122                 unsigned int    generation_error:1; /* also sets header_error */
123
124                 /* The following is for the data used to check parity */
125                 /* It is for the data with checksum */
126                 unsigned int    data_corrected:1;
127         };
128         struct btrfs_work       work;
129 };
130
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity {
133         struct scrub_ctx        *sctx;
134
135         struct btrfs_device     *scrub_dev;
136
137         u64                     logic_start;
138
139         u64                     logic_end;
140
141         int                     nsectors;
142
143         int                     stripe_len;
144
145         atomic_t                refs;
146
147         struct list_head        spages;
148
149         /* Work of parity check and repair */
150         struct btrfs_work       work;
151
152         /* Mark the parity blocks which have data */
153         unsigned long           *dbitmap;
154
155         /*
156          * Mark the parity blocks which have data, but errors happen when
157          * read data or check data
158          */
159         unsigned long           *ebitmap;
160
161         unsigned long           bitmap[0];
162 };
163
164 struct scrub_wr_ctx {
165         struct scrub_bio *wr_curr_bio;
166         struct btrfs_device *tgtdev;
167         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168         atomic_t flush_all_writes;
169         struct mutex wr_lock;
170 };
171
172 struct scrub_ctx {
173         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
174         struct btrfs_root       *dev_root;
175         int                     first_free;
176         int                     curr;
177         atomic_t                bios_in_flight;
178         atomic_t                workers_pending;
179         spinlock_t              list_lock;
180         wait_queue_head_t       list_wait;
181         u16                     csum_size;
182         struct list_head        csum_list;
183         atomic_t                cancel_req;
184         int                     readonly;
185         int                     pages_per_rd_bio;
186         u32                     sectorsize;
187         u32                     nodesize;
188
189         int                     is_dev_replace;
190         struct scrub_wr_ctx     wr_ctx;
191
192         /*
193          * statistics
194          */
195         struct btrfs_scrub_progress stat;
196         spinlock_t              stat_lock;
197
198         /*
199          * Use a ref counter to avoid use-after-free issues. Scrub workers
200          * decrement bios_in_flight and workers_pending and then do a wakeup
201          * on the list_wait wait queue. We must ensure the main scrub task
202          * doesn't free the scrub context before or while the workers are
203          * doing the wakeup() call.
204          */
205         atomic_t                refs;
206 };
207
208 struct scrub_fixup_nodatasum {
209         struct scrub_ctx        *sctx;
210         struct btrfs_device     *dev;
211         u64                     logical;
212         struct btrfs_root       *root;
213         struct btrfs_work       work;
214         int                     mirror_num;
215 };
216
217 struct scrub_nocow_inode {
218         u64                     inum;
219         u64                     offset;
220         u64                     root;
221         struct list_head        list;
222 };
223
224 struct scrub_copy_nocow_ctx {
225         struct scrub_ctx        *sctx;
226         u64                     logical;
227         u64                     len;
228         int                     mirror_num;
229         u64                     physical_for_dev_replace;
230         struct list_head        inodes;
231         struct btrfs_work       work;
232 };
233
234 struct scrub_warning {
235         struct btrfs_path       *path;
236         u64                     extent_item_size;
237         const char              *errstr;
238         sector_t                sector;
239         u64                     logical;
240         struct btrfs_device     *dev;
241 };
242
243 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
247 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
248 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
249                                      struct scrub_block *sblocks_for_recheck);
250 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251                                 struct scrub_block *sblock,
252                                 int retry_failed_mirror);
253 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
254 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
255                                              struct scrub_block *sblock_good);
256 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257                                             struct scrub_block *sblock_good,
258                                             int page_num, int force_write);
259 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261                                            int page_num);
262 static int scrub_checksum_data(struct scrub_block *sblock);
263 static int scrub_checksum_tree_block(struct scrub_block *sblock);
264 static int scrub_checksum_super(struct scrub_block *sblock);
265 static void scrub_block_get(struct scrub_block *sblock);
266 static void scrub_block_put(struct scrub_block *sblock);
267 static void scrub_page_get(struct scrub_page *spage);
268 static void scrub_page_put(struct scrub_page *spage);
269 static void scrub_parity_get(struct scrub_parity *sparity);
270 static void scrub_parity_put(struct scrub_parity *sparity);
271 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272                                     struct scrub_page *spage);
273 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
274                        u64 physical, struct btrfs_device *dev, u64 flags,
275                        u64 gen, int mirror_num, u8 *csum, int force,
276                        u64 physical_for_dev_replace);
277 static void scrub_bio_end_io(struct bio *bio);
278 static void scrub_bio_end_io_worker(struct btrfs_work *work);
279 static void scrub_block_complete(struct scrub_block *sblock);
280 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281                                u64 extent_logical, u64 extent_len,
282                                u64 *extent_physical,
283                                struct btrfs_device **extent_dev,
284                                int *extent_mirror_num);
285 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
286                               struct scrub_wr_ctx *wr_ctx,
287                               struct btrfs_fs_info *fs_info,
288                               struct btrfs_device *dev,
289                               int is_dev_replace);
290 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
291 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
292                                     struct scrub_page *spage);
293 static void scrub_wr_submit(struct scrub_ctx *sctx);
294 static void scrub_wr_bio_end_io(struct bio *bio);
295 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
296 static int write_page_nocow(struct scrub_ctx *sctx,
297                             u64 physical_for_dev_replace, struct page *page);
298 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
299                                       struct scrub_copy_nocow_ctx *ctx);
300 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
301                             int mirror_num, u64 physical_for_dev_replace);
302 static void copy_nocow_pages_worker(struct btrfs_work *work);
303 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
304 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
305 static void scrub_put_ctx(struct scrub_ctx *sctx);
306
307
308 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
309 {
310         atomic_inc(&sctx->refs);
311         atomic_inc(&sctx->bios_in_flight);
312 }
313
314 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
315 {
316         atomic_dec(&sctx->bios_in_flight);
317         wake_up(&sctx->list_wait);
318         scrub_put_ctx(sctx);
319 }
320
321 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
322 {
323         while (atomic_read(&fs_info->scrub_pause_req)) {
324                 mutex_unlock(&fs_info->scrub_lock);
325                 wait_event(fs_info->scrub_pause_wait,
326                    atomic_read(&fs_info->scrub_pause_req) == 0);
327                 mutex_lock(&fs_info->scrub_lock);
328         }
329 }
330
331 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
332 {
333         atomic_inc(&fs_info->scrubs_paused);
334         wake_up(&fs_info->scrub_pause_wait);
335 }
336
337 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
338 {
339         mutex_lock(&fs_info->scrub_lock);
340         __scrub_blocked_if_needed(fs_info);
341         atomic_dec(&fs_info->scrubs_paused);
342         mutex_unlock(&fs_info->scrub_lock);
343
344         wake_up(&fs_info->scrub_pause_wait);
345 }
346
347 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
348 {
349         scrub_pause_on(fs_info);
350         scrub_pause_off(fs_info);
351 }
352
353 /*
354  * used for workers that require transaction commits (i.e., for the
355  * NOCOW case)
356  */
357 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
358 {
359         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
360
361         atomic_inc(&sctx->refs);
362         /*
363          * increment scrubs_running to prevent cancel requests from
364          * completing as long as a worker is running. we must also
365          * increment scrubs_paused to prevent deadlocking on pause
366          * requests used for transactions commits (as the worker uses a
367          * transaction context). it is safe to regard the worker
368          * as paused for all matters practical. effectively, we only
369          * avoid cancellation requests from completing.
370          */
371         mutex_lock(&fs_info->scrub_lock);
372         atomic_inc(&fs_info->scrubs_running);
373         atomic_inc(&fs_info->scrubs_paused);
374         mutex_unlock(&fs_info->scrub_lock);
375
376         /*
377          * check if @scrubs_running=@scrubs_paused condition
378          * inside wait_event() is not an atomic operation.
379          * which means we may inc/dec @scrub_running/paused
380          * at any time. Let's wake up @scrub_pause_wait as
381          * much as we can to let commit transaction blocked less.
382          */
383         wake_up(&fs_info->scrub_pause_wait);
384
385         atomic_inc(&sctx->workers_pending);
386 }
387
388 /* used for workers that require transaction commits */
389 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
390 {
391         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
392
393         /*
394          * see scrub_pending_trans_workers_inc() why we're pretending
395          * to be paused in the scrub counters
396          */
397         mutex_lock(&fs_info->scrub_lock);
398         atomic_dec(&fs_info->scrubs_running);
399         atomic_dec(&fs_info->scrubs_paused);
400         mutex_unlock(&fs_info->scrub_lock);
401         atomic_dec(&sctx->workers_pending);
402         wake_up(&fs_info->scrub_pause_wait);
403         wake_up(&sctx->list_wait);
404         scrub_put_ctx(sctx);
405 }
406
407 static void scrub_free_csums(struct scrub_ctx *sctx)
408 {
409         while (!list_empty(&sctx->csum_list)) {
410                 struct btrfs_ordered_sum *sum;
411                 sum = list_first_entry(&sctx->csum_list,
412                                        struct btrfs_ordered_sum, list);
413                 list_del(&sum->list);
414                 kfree(sum);
415         }
416 }
417
418 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
419 {
420         int i;
421
422         if (!sctx)
423                 return;
424
425         scrub_free_wr_ctx(&sctx->wr_ctx);
426
427         /* this can happen when scrub is cancelled */
428         if (sctx->curr != -1) {
429                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
430
431                 for (i = 0; i < sbio->page_count; i++) {
432                         WARN_ON(!sbio->pagev[i]->page);
433                         scrub_block_put(sbio->pagev[i]->sblock);
434                 }
435                 bio_put(sbio->bio);
436         }
437
438         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
439                 struct scrub_bio *sbio = sctx->bios[i];
440
441                 if (!sbio)
442                         break;
443                 kfree(sbio);
444         }
445
446         scrub_free_csums(sctx);
447         kfree(sctx);
448 }
449
450 static void scrub_put_ctx(struct scrub_ctx *sctx)
451 {
452         if (atomic_dec_and_test(&sctx->refs))
453                 scrub_free_ctx(sctx);
454 }
455
456 static noinline_for_stack
457 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
458 {
459         struct scrub_ctx *sctx;
460         int             i;
461         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
462         int ret;
463
464         sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
465         if (!sctx)
466                 goto nomem;
467         atomic_set(&sctx->refs, 1);
468         sctx->is_dev_replace = is_dev_replace;
469         sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
470         sctx->curr = -1;
471         sctx->dev_root = dev->dev_root;
472         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
473                 struct scrub_bio *sbio;
474
475                 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
476                 if (!sbio)
477                         goto nomem;
478                 sctx->bios[i] = sbio;
479
480                 sbio->index = i;
481                 sbio->sctx = sctx;
482                 sbio->page_count = 0;
483                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
484                                 scrub_bio_end_io_worker, NULL, NULL);
485
486                 if (i != SCRUB_BIOS_PER_SCTX - 1)
487                         sctx->bios[i]->next_free = i + 1;
488                 else
489                         sctx->bios[i]->next_free = -1;
490         }
491         sctx->first_free = 0;
492         sctx->nodesize = dev->dev_root->nodesize;
493         sctx->sectorsize = dev->dev_root->sectorsize;
494         atomic_set(&sctx->bios_in_flight, 0);
495         atomic_set(&sctx->workers_pending, 0);
496         atomic_set(&sctx->cancel_req, 0);
497         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
498         INIT_LIST_HEAD(&sctx->csum_list);
499
500         spin_lock_init(&sctx->list_lock);
501         spin_lock_init(&sctx->stat_lock);
502         init_waitqueue_head(&sctx->list_wait);
503
504         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
505                                  fs_info->dev_replace.tgtdev, is_dev_replace);
506         if (ret) {
507                 scrub_free_ctx(sctx);
508                 return ERR_PTR(ret);
509         }
510         return sctx;
511
512 nomem:
513         scrub_free_ctx(sctx);
514         return ERR_PTR(-ENOMEM);
515 }
516
517 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
518                                      void *warn_ctx)
519 {
520         u64 isize;
521         u32 nlink;
522         int ret;
523         int i;
524         struct extent_buffer *eb;
525         struct btrfs_inode_item *inode_item;
526         struct scrub_warning *swarn = warn_ctx;
527         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
528         struct inode_fs_paths *ipath = NULL;
529         struct btrfs_root *local_root;
530         struct btrfs_key root_key;
531         struct btrfs_key key;
532
533         root_key.objectid = root;
534         root_key.type = BTRFS_ROOT_ITEM_KEY;
535         root_key.offset = (u64)-1;
536         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
537         if (IS_ERR(local_root)) {
538                 ret = PTR_ERR(local_root);
539                 goto err;
540         }
541
542         /*
543          * this makes the path point to (inum INODE_ITEM ioff)
544          */
545         key.objectid = inum;
546         key.type = BTRFS_INODE_ITEM_KEY;
547         key.offset = 0;
548
549         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
550         if (ret) {
551                 btrfs_release_path(swarn->path);
552                 goto err;
553         }
554
555         eb = swarn->path->nodes[0];
556         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
557                                         struct btrfs_inode_item);
558         isize = btrfs_inode_size(eb, inode_item);
559         nlink = btrfs_inode_nlink(eb, inode_item);
560         btrfs_release_path(swarn->path);
561
562         ipath = init_ipath(4096, local_root, swarn->path);
563         if (IS_ERR(ipath)) {
564                 ret = PTR_ERR(ipath);
565                 ipath = NULL;
566                 goto err;
567         }
568         ret = paths_from_inode(inum, ipath);
569
570         if (ret < 0)
571                 goto err;
572
573         /*
574          * we deliberately ignore the bit ipath might have been too small to
575          * hold all of the paths here
576          */
577         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
578                 btrfs_warn_in_rcu(fs_info,
579                                   "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
580                                   swarn->errstr, swarn->logical,
581                                   rcu_str_deref(swarn->dev->name),
582                                   (unsigned long long)swarn->sector,
583                                   root, inum, offset,
584                                   min(isize - offset, (u64)PAGE_SIZE), nlink,
585                                   (char *)(unsigned long)ipath->fspath->val[i]);
586
587         free_ipath(ipath);
588         return 0;
589
590 err:
591         btrfs_warn_in_rcu(fs_info,
592                           "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
593                           swarn->errstr, swarn->logical,
594                           rcu_str_deref(swarn->dev->name),
595                           (unsigned long long)swarn->sector,
596                           root, inum, offset, ret);
597
598         free_ipath(ipath);
599         return 0;
600 }
601
602 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
603 {
604         struct btrfs_device *dev;
605         struct btrfs_fs_info *fs_info;
606         struct btrfs_path *path;
607         struct btrfs_key found_key;
608         struct extent_buffer *eb;
609         struct btrfs_extent_item *ei;
610         struct scrub_warning swarn;
611         unsigned long ptr = 0;
612         u64 extent_item_pos;
613         u64 flags = 0;
614         u64 ref_root;
615         u32 item_size;
616         u8 ref_level = 0;
617         int ret;
618
619         WARN_ON(sblock->page_count < 1);
620         dev = sblock->pagev[0]->dev;
621         fs_info = sblock->sctx->dev_root->fs_info;
622
623         path = btrfs_alloc_path();
624         if (!path)
625                 return;
626
627         swarn.sector = (sblock->pagev[0]->physical) >> 9;
628         swarn.logical = sblock->pagev[0]->logical;
629         swarn.errstr = errstr;
630         swarn.dev = NULL;
631
632         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
633                                   &flags);
634         if (ret < 0)
635                 goto out;
636
637         extent_item_pos = swarn.logical - found_key.objectid;
638         swarn.extent_item_size = found_key.offset;
639
640         eb = path->nodes[0];
641         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
642         item_size = btrfs_item_size_nr(eb, path->slots[0]);
643
644         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
645                 do {
646                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
647                                                       item_size, &ref_root,
648                                                       &ref_level);
649                         btrfs_warn_in_rcu(fs_info,
650                                 "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
651                                 errstr, swarn.logical,
652                                 rcu_str_deref(dev->name),
653                                 (unsigned long long)swarn.sector,
654                                 ref_level ? "node" : "leaf",
655                                 ret < 0 ? -1 : ref_level,
656                                 ret < 0 ? -1 : ref_root);
657                 } while (ret != 1);
658                 btrfs_release_path(path);
659         } else {
660                 btrfs_release_path(path);
661                 swarn.path = path;
662                 swarn.dev = dev;
663                 iterate_extent_inodes(fs_info, found_key.objectid,
664                                         extent_item_pos, 1,
665                                         scrub_print_warning_inode, &swarn);
666         }
667
668 out:
669         btrfs_free_path(path);
670 }
671
672 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
673 {
674         struct page *page = NULL;
675         unsigned long index;
676         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
677         int ret;
678         int corrected = 0;
679         struct btrfs_key key;
680         struct inode *inode = NULL;
681         struct btrfs_fs_info *fs_info;
682         u64 end = offset + PAGE_SIZE - 1;
683         struct btrfs_root *local_root;
684         int srcu_index;
685
686         key.objectid = root;
687         key.type = BTRFS_ROOT_ITEM_KEY;
688         key.offset = (u64)-1;
689
690         fs_info = fixup->root->fs_info;
691         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
692
693         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
694         if (IS_ERR(local_root)) {
695                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
696                 return PTR_ERR(local_root);
697         }
698
699         key.type = BTRFS_INODE_ITEM_KEY;
700         key.objectid = inum;
701         key.offset = 0;
702         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
703         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
704         if (IS_ERR(inode))
705                 return PTR_ERR(inode);
706
707         index = offset >> PAGE_SHIFT;
708
709         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
710         if (!page) {
711                 ret = -ENOMEM;
712                 goto out;
713         }
714
715         if (PageUptodate(page)) {
716                 if (PageDirty(page)) {
717                         /*
718                          * we need to write the data to the defect sector. the
719                          * data that was in that sector is not in memory,
720                          * because the page was modified. we must not write the
721                          * modified page to that sector.
722                          *
723                          * TODO: what could be done here: wait for the delalloc
724                          *       runner to write out that page (might involve
725                          *       COW) and see whether the sector is still
726                          *       referenced afterwards.
727                          *
728                          * For the meantime, we'll treat this error
729                          * incorrectable, although there is a chance that a
730                          * later scrub will find the bad sector again and that
731                          * there's no dirty page in memory, then.
732                          */
733                         ret = -EIO;
734                         goto out;
735                 }
736                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
737                                         fixup->logical, page,
738                                         offset - page_offset(page),
739                                         fixup->mirror_num);
740                 unlock_page(page);
741                 corrected = !ret;
742         } else {
743                 /*
744                  * we need to get good data first. the general readpage path
745                  * will call repair_io_failure for us, we just have to make
746                  * sure we read the bad mirror.
747                  */
748                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
749                                         EXTENT_DAMAGED);
750                 if (ret) {
751                         /* set_extent_bits should give proper error */
752                         WARN_ON(ret > 0);
753                         if (ret > 0)
754                                 ret = -EFAULT;
755                         goto out;
756                 }
757
758                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
759                                                 btrfs_get_extent,
760                                                 fixup->mirror_num);
761                 wait_on_page_locked(page);
762
763                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
764                                                 end, EXTENT_DAMAGED, 0, NULL);
765                 if (!corrected)
766                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
767                                                 EXTENT_DAMAGED);
768         }
769
770 out:
771         if (page)
772                 put_page(page);
773
774         iput(inode);
775
776         if (ret < 0)
777                 return ret;
778
779         if (ret == 0 && corrected) {
780                 /*
781                  * we only need to call readpage for one of the inodes belonging
782                  * to this extent. so make iterate_extent_inodes stop
783                  */
784                 return 1;
785         }
786
787         return -EIO;
788 }
789
790 static void scrub_fixup_nodatasum(struct btrfs_work *work)
791 {
792         int ret;
793         struct scrub_fixup_nodatasum *fixup;
794         struct scrub_ctx *sctx;
795         struct btrfs_trans_handle *trans = NULL;
796         struct btrfs_path *path;
797         int uncorrectable = 0;
798
799         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
800         sctx = fixup->sctx;
801
802         path = btrfs_alloc_path();
803         if (!path) {
804                 spin_lock(&sctx->stat_lock);
805                 ++sctx->stat.malloc_errors;
806                 spin_unlock(&sctx->stat_lock);
807                 uncorrectable = 1;
808                 goto out;
809         }
810
811         trans = btrfs_join_transaction(fixup->root);
812         if (IS_ERR(trans)) {
813                 uncorrectable = 1;
814                 goto out;
815         }
816
817         /*
818          * the idea is to trigger a regular read through the standard path. we
819          * read a page from the (failed) logical address by specifying the
820          * corresponding copynum of the failed sector. thus, that readpage is
821          * expected to fail.
822          * that is the point where on-the-fly error correction will kick in
823          * (once it's finished) and rewrite the failed sector if a good copy
824          * can be found.
825          */
826         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
827                                                 path, scrub_fixup_readpage,
828                                                 fixup);
829         if (ret < 0) {
830                 uncorrectable = 1;
831                 goto out;
832         }
833         WARN_ON(ret != 1);
834
835         spin_lock(&sctx->stat_lock);
836         ++sctx->stat.corrected_errors;
837         spin_unlock(&sctx->stat_lock);
838
839 out:
840         if (trans && !IS_ERR(trans))
841                 btrfs_end_transaction(trans, fixup->root);
842         if (uncorrectable) {
843                 spin_lock(&sctx->stat_lock);
844                 ++sctx->stat.uncorrectable_errors;
845                 spin_unlock(&sctx->stat_lock);
846                 btrfs_dev_replace_stats_inc(
847                         &sctx->dev_root->fs_info->dev_replace.
848                         num_uncorrectable_read_errors);
849                 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
850                     "unable to fixup (nodatasum) error at logical %llu on dev %s",
851                         fixup->logical, rcu_str_deref(fixup->dev->name));
852         }
853
854         btrfs_free_path(path);
855         kfree(fixup);
856
857         scrub_pending_trans_workers_dec(sctx);
858 }
859
860 static inline void scrub_get_recover(struct scrub_recover *recover)
861 {
862         atomic_inc(&recover->refs);
863 }
864
865 static inline void scrub_put_recover(struct scrub_recover *recover)
866 {
867         if (atomic_dec_and_test(&recover->refs)) {
868                 btrfs_put_bbio(recover->bbio);
869                 kfree(recover);
870         }
871 }
872
873 /*
874  * scrub_handle_errored_block gets called when either verification of the
875  * pages failed or the bio failed to read, e.g. with EIO. In the latter
876  * case, this function handles all pages in the bio, even though only one
877  * may be bad.
878  * The goal of this function is to repair the errored block by using the
879  * contents of one of the mirrors.
880  */
881 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
882 {
883         struct scrub_ctx *sctx = sblock_to_check->sctx;
884         struct btrfs_device *dev;
885         struct btrfs_fs_info *fs_info;
886         u64 length;
887         u64 logical;
888         unsigned int failed_mirror_index;
889         unsigned int is_metadata;
890         unsigned int have_csum;
891         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
892         struct scrub_block *sblock_bad;
893         int ret;
894         int mirror_index;
895         int page_num;
896         int success;
897         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
898                                       DEFAULT_RATELIMIT_BURST);
899
900         BUG_ON(sblock_to_check->page_count < 1);
901         fs_info = sctx->dev_root->fs_info;
902         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
903                 /*
904                  * if we find an error in a super block, we just report it.
905                  * They will get written with the next transaction commit
906                  * anyway
907                  */
908                 spin_lock(&sctx->stat_lock);
909                 ++sctx->stat.super_errors;
910                 spin_unlock(&sctx->stat_lock);
911                 return 0;
912         }
913         length = sblock_to_check->page_count * PAGE_SIZE;
914         logical = sblock_to_check->pagev[0]->logical;
915         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
916         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
917         is_metadata = !(sblock_to_check->pagev[0]->flags &
918                         BTRFS_EXTENT_FLAG_DATA);
919         have_csum = sblock_to_check->pagev[0]->have_csum;
920         dev = sblock_to_check->pagev[0]->dev;
921
922         /*
923          * read all mirrors one after the other. This includes to
924          * re-read the extent or metadata block that failed (that was
925          * the cause that this fixup code is called) another time,
926          * page by page this time in order to know which pages
927          * caused I/O errors and which ones are good (for all mirrors).
928          * It is the goal to handle the situation when more than one
929          * mirror contains I/O errors, but the errors do not
930          * overlap, i.e. the data can be repaired by selecting the
931          * pages from those mirrors without I/O error on the
932          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
933          * would be that mirror #1 has an I/O error on the first page,
934          * the second page is good, and mirror #2 has an I/O error on
935          * the second page, but the first page is good.
936          * Then the first page of the first mirror can be repaired by
937          * taking the first page of the second mirror, and the
938          * second page of the second mirror can be repaired by
939          * copying the contents of the 2nd page of the 1st mirror.
940          * One more note: if the pages of one mirror contain I/O
941          * errors, the checksum cannot be verified. In order to get
942          * the best data for repairing, the first attempt is to find
943          * a mirror without I/O errors and with a validated checksum.
944          * Only if this is not possible, the pages are picked from
945          * mirrors with I/O errors without considering the checksum.
946          * If the latter is the case, at the end, the checksum of the
947          * repaired area is verified in order to correctly maintain
948          * the statistics.
949          */
950
951         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
952                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
953         if (!sblocks_for_recheck) {
954                 spin_lock(&sctx->stat_lock);
955                 sctx->stat.malloc_errors++;
956                 sctx->stat.read_errors++;
957                 sctx->stat.uncorrectable_errors++;
958                 spin_unlock(&sctx->stat_lock);
959                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
960                 goto out;
961         }
962
963         /* setup the context, map the logical blocks and alloc the pages */
964         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
965         if (ret) {
966                 spin_lock(&sctx->stat_lock);
967                 sctx->stat.read_errors++;
968                 sctx->stat.uncorrectable_errors++;
969                 spin_unlock(&sctx->stat_lock);
970                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
971                 goto out;
972         }
973         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
974         sblock_bad = sblocks_for_recheck + failed_mirror_index;
975
976         /* build and submit the bios for the failed mirror, check checksums */
977         scrub_recheck_block(fs_info, sblock_bad, 1);
978
979         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
980             sblock_bad->no_io_error_seen) {
981                 /*
982                  * the error disappeared after reading page by page, or
983                  * the area was part of a huge bio and other parts of the
984                  * bio caused I/O errors, or the block layer merged several
985                  * read requests into one and the error is caused by a
986                  * different bio (usually one of the two latter cases is
987                  * the cause)
988                  */
989                 spin_lock(&sctx->stat_lock);
990                 sctx->stat.unverified_errors++;
991                 sblock_to_check->data_corrected = 1;
992                 spin_unlock(&sctx->stat_lock);
993
994                 if (sctx->is_dev_replace)
995                         scrub_write_block_to_dev_replace(sblock_bad);
996                 goto out;
997         }
998
999         if (!sblock_bad->no_io_error_seen) {
1000                 spin_lock(&sctx->stat_lock);
1001                 sctx->stat.read_errors++;
1002                 spin_unlock(&sctx->stat_lock);
1003                 if (__ratelimit(&_rs))
1004                         scrub_print_warning("i/o error", sblock_to_check);
1005                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1006         } else if (sblock_bad->checksum_error) {
1007                 spin_lock(&sctx->stat_lock);
1008                 sctx->stat.csum_errors++;
1009                 spin_unlock(&sctx->stat_lock);
1010                 if (__ratelimit(&_rs))
1011                         scrub_print_warning("checksum error", sblock_to_check);
1012                 btrfs_dev_stat_inc_and_print(dev,
1013                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1014         } else if (sblock_bad->header_error) {
1015                 spin_lock(&sctx->stat_lock);
1016                 sctx->stat.verify_errors++;
1017                 spin_unlock(&sctx->stat_lock);
1018                 if (__ratelimit(&_rs))
1019                         scrub_print_warning("checksum/header error",
1020                                             sblock_to_check);
1021                 if (sblock_bad->generation_error)
1022                         btrfs_dev_stat_inc_and_print(dev,
1023                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1024                 else
1025                         btrfs_dev_stat_inc_and_print(dev,
1026                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1027         }
1028
1029         if (sctx->readonly) {
1030                 ASSERT(!sctx->is_dev_replace);
1031                 goto out;
1032         }
1033
1034         /*
1035          * NOTE: Even for nodatasum case, it's still possible that it's a
1036          * compressed data extent, thus scrub_fixup_nodatasum(), which write
1037          * inode page cache onto disk, could cause serious data corruption.
1038          *
1039          * So here we could only read from disk, and hope our recovery could
1040          * reach disk before the newer write.
1041          */
1042         if (0 && !is_metadata && !have_csum) {
1043                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1044
1045                 WARN_ON(sctx->is_dev_replace);
1046
1047                 /*
1048                  * !is_metadata and !have_csum, this means that the data
1049                  * might not be COWed, that it might be modified
1050                  * concurrently. The general strategy to work on the
1051                  * commit root does not help in the case when COW is not
1052                  * used.
1053                  */
1054                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1055                 if (!fixup_nodatasum)
1056                         goto did_not_correct_error;
1057                 fixup_nodatasum->sctx = sctx;
1058                 fixup_nodatasum->dev = dev;
1059                 fixup_nodatasum->logical = logical;
1060                 fixup_nodatasum->root = fs_info->extent_root;
1061                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1062                 scrub_pending_trans_workers_inc(sctx);
1063                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1064                                 scrub_fixup_nodatasum, NULL, NULL);
1065                 btrfs_queue_work(fs_info->scrub_workers,
1066                                  &fixup_nodatasum->work);
1067                 goto out;
1068         }
1069
1070         /*
1071          * now build and submit the bios for the other mirrors, check
1072          * checksums.
1073          * First try to pick the mirror which is completely without I/O
1074          * errors and also does not have a checksum error.
1075          * If one is found, and if a checksum is present, the full block
1076          * that is known to contain an error is rewritten. Afterwards
1077          * the block is known to be corrected.
1078          * If a mirror is found which is completely correct, and no
1079          * checksum is present, only those pages are rewritten that had
1080          * an I/O error in the block to be repaired, since it cannot be
1081          * determined, which copy of the other pages is better (and it
1082          * could happen otherwise that a correct page would be
1083          * overwritten by a bad one).
1084          */
1085         for (mirror_index = 0;
1086              mirror_index < BTRFS_MAX_MIRRORS &&
1087              sblocks_for_recheck[mirror_index].page_count > 0;
1088              mirror_index++) {
1089                 struct scrub_block *sblock_other;
1090
1091                 if (mirror_index == failed_mirror_index)
1092                         continue;
1093                 sblock_other = sblocks_for_recheck + mirror_index;
1094
1095                 /* build and submit the bios, check checksums */
1096                 scrub_recheck_block(fs_info, sblock_other, 0);
1097
1098                 if (!sblock_other->header_error &&
1099                     !sblock_other->checksum_error &&
1100                     sblock_other->no_io_error_seen) {
1101                         if (sctx->is_dev_replace) {
1102                                 scrub_write_block_to_dev_replace(sblock_other);
1103                                 goto corrected_error;
1104                         } else {
1105                                 ret = scrub_repair_block_from_good_copy(
1106                                                 sblock_bad, sblock_other);
1107                                 if (!ret)
1108                                         goto corrected_error;
1109                         }
1110                 }
1111         }
1112
1113         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1114                 goto did_not_correct_error;
1115
1116         /*
1117          * In case of I/O errors in the area that is supposed to be
1118          * repaired, continue by picking good copies of those pages.
1119          * Select the good pages from mirrors to rewrite bad pages from
1120          * the area to fix. Afterwards verify the checksum of the block
1121          * that is supposed to be repaired. This verification step is
1122          * only done for the purpose of statistic counting and for the
1123          * final scrub report, whether errors remain.
1124          * A perfect algorithm could make use of the checksum and try
1125          * all possible combinations of pages from the different mirrors
1126          * until the checksum verification succeeds. For example, when
1127          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1128          * of mirror #2 is readable but the final checksum test fails,
1129          * then the 2nd page of mirror #3 could be tried, whether now
1130          * the final checksum succeeds. But this would be a rare
1131          * exception and is therefore not implemented. At least it is
1132          * avoided that the good copy is overwritten.
1133          * A more useful improvement would be to pick the sectors
1134          * without I/O error based on sector sizes (512 bytes on legacy
1135          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1136          * mirror could be repaired by taking 512 byte of a different
1137          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1138          * area are unreadable.
1139          */
1140         success = 1;
1141         for (page_num = 0; page_num < sblock_bad->page_count;
1142              page_num++) {
1143                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1144                 struct scrub_block *sblock_other = NULL;
1145
1146                 /* skip no-io-error page in scrub */
1147                 if (!page_bad->io_error && !sctx->is_dev_replace)
1148                         continue;
1149
1150                 /* try to find no-io-error page in mirrors */
1151                 if (page_bad->io_error) {
1152                         for (mirror_index = 0;
1153                              mirror_index < BTRFS_MAX_MIRRORS &&
1154                              sblocks_for_recheck[mirror_index].page_count > 0;
1155                              mirror_index++) {
1156                                 if (!sblocks_for_recheck[mirror_index].
1157                                     pagev[page_num]->io_error) {
1158                                         sblock_other = sblocks_for_recheck +
1159                                                        mirror_index;
1160                                         break;
1161                                 }
1162                         }
1163                         if (!sblock_other)
1164                                 success = 0;
1165                 }
1166
1167                 if (sctx->is_dev_replace) {
1168                         /*
1169                          * did not find a mirror to fetch the page
1170                          * from. scrub_write_page_to_dev_replace()
1171                          * handles this case (page->io_error), by
1172                          * filling the block with zeros before
1173                          * submitting the write request
1174                          */
1175                         if (!sblock_other)
1176                                 sblock_other = sblock_bad;
1177
1178                         if (scrub_write_page_to_dev_replace(sblock_other,
1179                                                             page_num) != 0) {
1180                                 btrfs_dev_replace_stats_inc(
1181                                         &sctx->dev_root->
1182                                         fs_info->dev_replace.
1183                                         num_write_errors);
1184                                 success = 0;
1185                         }
1186                 } else if (sblock_other) {
1187                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1188                                                                sblock_other,
1189                                                                page_num, 0);
1190                         if (0 == ret)
1191                                 page_bad->io_error = 0;
1192                         else
1193                                 success = 0;
1194                 }
1195         }
1196
1197         if (success && !sctx->is_dev_replace) {
1198                 if (is_metadata || have_csum) {
1199                         /*
1200                          * need to verify the checksum now that all
1201                          * sectors on disk are repaired (the write
1202                          * request for data to be repaired is on its way).
1203                          * Just be lazy and use scrub_recheck_block()
1204                          * which re-reads the data before the checksum
1205                          * is verified, but most likely the data comes out
1206                          * of the page cache.
1207                          */
1208                         scrub_recheck_block(fs_info, sblock_bad, 1);
1209                         if (!sblock_bad->header_error &&
1210                             !sblock_bad->checksum_error &&
1211                             sblock_bad->no_io_error_seen)
1212                                 goto corrected_error;
1213                         else
1214                                 goto did_not_correct_error;
1215                 } else {
1216 corrected_error:
1217                         spin_lock(&sctx->stat_lock);
1218                         sctx->stat.corrected_errors++;
1219                         sblock_to_check->data_corrected = 1;
1220                         spin_unlock(&sctx->stat_lock);
1221                         btrfs_err_rl_in_rcu(fs_info,
1222                                 "fixed up error at logical %llu on dev %s",
1223                                 logical, rcu_str_deref(dev->name));
1224                 }
1225         } else {
1226 did_not_correct_error:
1227                 spin_lock(&sctx->stat_lock);
1228                 sctx->stat.uncorrectable_errors++;
1229                 spin_unlock(&sctx->stat_lock);
1230                 btrfs_err_rl_in_rcu(fs_info,
1231                         "unable to fixup (regular) error at logical %llu on dev %s",
1232                         logical, rcu_str_deref(dev->name));
1233         }
1234
1235 out:
1236         if (sblocks_for_recheck) {
1237                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1238                      mirror_index++) {
1239                         struct scrub_block *sblock = sblocks_for_recheck +
1240                                                      mirror_index;
1241                         struct scrub_recover *recover;
1242                         int page_index;
1243
1244                         for (page_index = 0; page_index < sblock->page_count;
1245                              page_index++) {
1246                                 sblock->pagev[page_index]->sblock = NULL;
1247                                 recover = sblock->pagev[page_index]->recover;
1248                                 if (recover) {
1249                                         scrub_put_recover(recover);
1250                                         sblock->pagev[page_index]->recover =
1251                                                                         NULL;
1252                                 }
1253                                 scrub_page_put(sblock->pagev[page_index]);
1254                         }
1255                 }
1256                 kfree(sblocks_for_recheck);
1257         }
1258
1259         return 0;
1260 }
1261
1262 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1263 {
1264         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1265                 return 2;
1266         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1267                 return 3;
1268         else
1269                 return (int)bbio->num_stripes;
1270 }
1271
1272 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1273                                                  u64 *raid_map,
1274                                                  u64 mapped_length,
1275                                                  int nstripes, int mirror,
1276                                                  int *stripe_index,
1277                                                  u64 *stripe_offset)
1278 {
1279         int i;
1280
1281         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1282                 /* RAID5/6 */
1283                 for (i = 0; i < nstripes; i++) {
1284                         if (raid_map[i] == RAID6_Q_STRIPE ||
1285                             raid_map[i] == RAID5_P_STRIPE)
1286                                 continue;
1287
1288                         if (logical >= raid_map[i] &&
1289                             logical < raid_map[i] + mapped_length)
1290                                 break;
1291                 }
1292
1293                 *stripe_index = i;
1294                 *stripe_offset = logical - raid_map[i];
1295         } else {
1296                 /* The other RAID type */
1297                 *stripe_index = mirror;
1298                 *stripe_offset = 0;
1299         }
1300 }
1301
1302 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1303                                      struct scrub_block *sblocks_for_recheck)
1304 {
1305         struct scrub_ctx *sctx = original_sblock->sctx;
1306         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1307         u64 length = original_sblock->page_count * PAGE_SIZE;
1308         u64 logical = original_sblock->pagev[0]->logical;
1309         u64 generation = original_sblock->pagev[0]->generation;
1310         u64 flags = original_sblock->pagev[0]->flags;
1311         u64 have_csum = original_sblock->pagev[0]->have_csum;
1312         struct scrub_recover *recover;
1313         struct btrfs_bio *bbio;
1314         u64 sublen;
1315         u64 mapped_length;
1316         u64 stripe_offset;
1317         int stripe_index;
1318         int page_index = 0;
1319         int mirror_index;
1320         int nmirrors;
1321         int ret;
1322
1323         /*
1324          * note: the two members refs and outstanding_pages
1325          * are not used (and not set) in the blocks that are used for
1326          * the recheck procedure
1327          */
1328
1329         while (length > 0) {
1330                 sublen = min_t(u64, length, PAGE_SIZE);
1331                 mapped_length = sublen;
1332                 bbio = NULL;
1333
1334                 /*
1335                  * with a length of PAGE_SIZE, each returned stripe
1336                  * represents one mirror
1337                  */
1338                 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1339                                        &mapped_length, &bbio, 0, 1);
1340                 if (ret || !bbio || mapped_length < sublen) {
1341                         btrfs_put_bbio(bbio);
1342                         return -EIO;
1343                 }
1344
1345                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1346                 if (!recover) {
1347                         btrfs_put_bbio(bbio);
1348                         return -ENOMEM;
1349                 }
1350
1351                 atomic_set(&recover->refs, 1);
1352                 recover->bbio = bbio;
1353                 recover->map_length = mapped_length;
1354
1355                 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1356
1357                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1358
1359                 for (mirror_index = 0; mirror_index < nmirrors;
1360                      mirror_index++) {
1361                         struct scrub_block *sblock;
1362                         struct scrub_page *page;
1363
1364                         sblock = sblocks_for_recheck + mirror_index;
1365                         sblock->sctx = sctx;
1366
1367                         page = kzalloc(sizeof(*page), GFP_NOFS);
1368                         if (!page) {
1369 leave_nomem:
1370                                 spin_lock(&sctx->stat_lock);
1371                                 sctx->stat.malloc_errors++;
1372                                 spin_unlock(&sctx->stat_lock);
1373                                 scrub_put_recover(recover);
1374                                 return -ENOMEM;
1375                         }
1376                         scrub_page_get(page);
1377                         sblock->pagev[page_index] = page;
1378                         page->sblock = sblock;
1379                         page->flags = flags;
1380                         page->generation = generation;
1381                         page->logical = logical;
1382                         page->have_csum = have_csum;
1383                         if (have_csum)
1384                                 memcpy(page->csum,
1385                                        original_sblock->pagev[0]->csum,
1386                                        sctx->csum_size);
1387
1388                         scrub_stripe_index_and_offset(logical,
1389                                                       bbio->map_type,
1390                                                       bbio->raid_map,
1391                                                       mapped_length,
1392                                                       bbio->num_stripes -
1393                                                       bbio->num_tgtdevs,
1394                                                       mirror_index,
1395                                                       &stripe_index,
1396                                                       &stripe_offset);
1397                         page->physical = bbio->stripes[stripe_index].physical +
1398                                          stripe_offset;
1399                         page->dev = bbio->stripes[stripe_index].dev;
1400
1401                         BUG_ON(page_index >= original_sblock->page_count);
1402                         page->physical_for_dev_replace =
1403                                 original_sblock->pagev[page_index]->
1404                                 physical_for_dev_replace;
1405                         /* for missing devices, dev->bdev is NULL */
1406                         page->mirror_num = mirror_index + 1;
1407                         sblock->page_count++;
1408                         page->page = alloc_page(GFP_NOFS);
1409                         if (!page->page)
1410                                 goto leave_nomem;
1411
1412                         scrub_get_recover(recover);
1413                         page->recover = recover;
1414                 }
1415                 scrub_put_recover(recover);
1416                 length -= sublen;
1417                 logical += sublen;
1418                 page_index++;
1419         }
1420
1421         return 0;
1422 }
1423
1424 struct scrub_bio_ret {
1425         struct completion event;
1426         int error;
1427 };
1428
1429 static void scrub_bio_wait_endio(struct bio *bio)
1430 {
1431         struct scrub_bio_ret *ret = bio->bi_private;
1432
1433         ret->error = bio->bi_error;
1434         complete(&ret->event);
1435 }
1436
1437 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1438 {
1439         return page->recover &&
1440                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1441 }
1442
1443 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1444                                         struct bio *bio,
1445                                         struct scrub_page *page)
1446 {
1447         struct scrub_bio_ret done;
1448         int ret;
1449
1450         init_completion(&done.event);
1451         done.error = 0;
1452         bio->bi_iter.bi_sector = page->logical >> 9;
1453         bio->bi_private = &done;
1454         bio->bi_end_io = scrub_bio_wait_endio;
1455
1456         ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1457                                     page->recover->map_length,
1458                                     page->mirror_num, 0);
1459         if (ret)
1460                 return ret;
1461
1462         wait_for_completion(&done.event);
1463         if (done.error)
1464                 return -EIO;
1465
1466         return 0;
1467 }
1468
1469 /*
1470  * this function will check the on disk data for checksum errors, header
1471  * errors and read I/O errors. If any I/O errors happen, the exact pages
1472  * which are errored are marked as being bad. The goal is to enable scrub
1473  * to take those pages that are not errored from all the mirrors so that
1474  * the pages that are errored in the just handled mirror can be repaired.
1475  */
1476 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1477                                 struct scrub_block *sblock,
1478                                 int retry_failed_mirror)
1479 {
1480         int page_num;
1481
1482         sblock->no_io_error_seen = 1;
1483
1484         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1485                 struct bio *bio;
1486                 struct scrub_page *page = sblock->pagev[page_num];
1487
1488                 if (page->dev->bdev == NULL) {
1489                         page->io_error = 1;
1490                         sblock->no_io_error_seen = 0;
1491                         continue;
1492                 }
1493
1494                 WARN_ON(!page->page);
1495                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1496                 if (!bio) {
1497                         page->io_error = 1;
1498                         sblock->no_io_error_seen = 0;
1499                         continue;
1500                 }
1501                 bio->bi_bdev = page->dev->bdev;
1502
1503                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1504                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1505                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1506                                 sblock->no_io_error_seen = 0;
1507                 } else {
1508                         bio->bi_iter.bi_sector = page->physical >> 9;
1509                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
1510
1511                         if (btrfsic_submit_bio_wait(bio))
1512                                 sblock->no_io_error_seen = 0;
1513                 }
1514
1515                 bio_put(bio);
1516         }
1517
1518         if (sblock->no_io_error_seen)
1519                 scrub_recheck_block_checksum(sblock);
1520 }
1521
1522 static inline int scrub_check_fsid(u8 fsid[],
1523                                    struct scrub_page *spage)
1524 {
1525         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1526         int ret;
1527
1528         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1529         return !ret;
1530 }
1531
1532 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1533 {
1534         sblock->header_error = 0;
1535         sblock->checksum_error = 0;
1536         sblock->generation_error = 0;
1537
1538         if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1539                 scrub_checksum_data(sblock);
1540         else
1541                 scrub_checksum_tree_block(sblock);
1542 }
1543
1544 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1545                                              struct scrub_block *sblock_good)
1546 {
1547         int page_num;
1548         int ret = 0;
1549
1550         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1551                 int ret_sub;
1552
1553                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1554                                                            sblock_good,
1555                                                            page_num, 1);
1556                 if (ret_sub)
1557                         ret = ret_sub;
1558         }
1559
1560         return ret;
1561 }
1562
1563 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1564                                             struct scrub_block *sblock_good,
1565                                             int page_num, int force_write)
1566 {
1567         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1568         struct scrub_page *page_good = sblock_good->pagev[page_num];
1569
1570         BUG_ON(page_bad->page == NULL);
1571         BUG_ON(page_good->page == NULL);
1572         if (force_write || sblock_bad->header_error ||
1573             sblock_bad->checksum_error || page_bad->io_error) {
1574                 struct bio *bio;
1575                 int ret;
1576
1577                 if (!page_bad->dev->bdev) {
1578                         btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
1579                                 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1580                         return -EIO;
1581                 }
1582
1583                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1584                 if (!bio)
1585                         return -EIO;
1586                 bio->bi_bdev = page_bad->dev->bdev;
1587                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1588                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1589
1590                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1591                 if (PAGE_SIZE != ret) {
1592                         bio_put(bio);
1593                         return -EIO;
1594                 }
1595
1596                 if (btrfsic_submit_bio_wait(bio)) {
1597                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1598                                 BTRFS_DEV_STAT_WRITE_ERRS);
1599                         btrfs_dev_replace_stats_inc(
1600                                 &sblock_bad->sctx->dev_root->fs_info->
1601                                 dev_replace.num_write_errors);
1602                         bio_put(bio);
1603                         return -EIO;
1604                 }
1605                 bio_put(bio);
1606         }
1607
1608         return 0;
1609 }
1610
1611 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1612 {
1613         int page_num;
1614
1615         /*
1616          * This block is used for the check of the parity on the source device,
1617          * so the data needn't be written into the destination device.
1618          */
1619         if (sblock->sparity)
1620                 return;
1621
1622         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1623                 int ret;
1624
1625                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1626                 if (ret)
1627                         btrfs_dev_replace_stats_inc(
1628                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1629                                 num_write_errors);
1630         }
1631 }
1632
1633 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1634                                            int page_num)
1635 {
1636         struct scrub_page *spage = sblock->pagev[page_num];
1637
1638         BUG_ON(spage->page == NULL);
1639         if (spage->io_error) {
1640                 void *mapped_buffer = kmap_atomic(spage->page);
1641
1642                 memset(mapped_buffer, 0, PAGE_SIZE);
1643                 flush_dcache_page(spage->page);
1644                 kunmap_atomic(mapped_buffer);
1645         }
1646         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1647 }
1648
1649 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1650                                     struct scrub_page *spage)
1651 {
1652         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1653         struct scrub_bio *sbio;
1654         int ret;
1655
1656         mutex_lock(&wr_ctx->wr_lock);
1657 again:
1658         if (!wr_ctx->wr_curr_bio) {
1659                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1660                                               GFP_KERNEL);
1661                 if (!wr_ctx->wr_curr_bio) {
1662                         mutex_unlock(&wr_ctx->wr_lock);
1663                         return -ENOMEM;
1664                 }
1665                 wr_ctx->wr_curr_bio->sctx = sctx;
1666                 wr_ctx->wr_curr_bio->page_count = 0;
1667         }
1668         sbio = wr_ctx->wr_curr_bio;
1669         if (sbio->page_count == 0) {
1670                 struct bio *bio;
1671
1672                 sbio->physical = spage->physical_for_dev_replace;
1673                 sbio->logical = spage->logical;
1674                 sbio->dev = wr_ctx->tgtdev;
1675                 bio = sbio->bio;
1676                 if (!bio) {
1677                         bio = btrfs_io_bio_alloc(GFP_KERNEL,
1678                                         wr_ctx->pages_per_wr_bio);
1679                         if (!bio) {
1680                                 mutex_unlock(&wr_ctx->wr_lock);
1681                                 return -ENOMEM;
1682                         }
1683                         sbio->bio = bio;
1684                 }
1685
1686                 bio->bi_private = sbio;
1687                 bio->bi_end_io = scrub_wr_bio_end_io;
1688                 bio->bi_bdev = sbio->dev->bdev;
1689                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1690                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1691                 sbio->err = 0;
1692         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1693                    spage->physical_for_dev_replace ||
1694                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1695                    spage->logical) {
1696                 scrub_wr_submit(sctx);
1697                 goto again;
1698         }
1699
1700         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1701         if (ret != PAGE_SIZE) {
1702                 if (sbio->page_count < 1) {
1703                         bio_put(sbio->bio);
1704                         sbio->bio = NULL;
1705                         mutex_unlock(&wr_ctx->wr_lock);
1706                         return -EIO;
1707                 }
1708                 scrub_wr_submit(sctx);
1709                 goto again;
1710         }
1711
1712         sbio->pagev[sbio->page_count] = spage;
1713         scrub_page_get(spage);
1714         sbio->page_count++;
1715         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1716                 scrub_wr_submit(sctx);
1717         mutex_unlock(&wr_ctx->wr_lock);
1718
1719         return 0;
1720 }
1721
1722 static void scrub_wr_submit(struct scrub_ctx *sctx)
1723 {
1724         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1725         struct scrub_bio *sbio;
1726
1727         if (!wr_ctx->wr_curr_bio)
1728                 return;
1729
1730         sbio = wr_ctx->wr_curr_bio;
1731         wr_ctx->wr_curr_bio = NULL;
1732         WARN_ON(!sbio->bio->bi_bdev);
1733         scrub_pending_bio_inc(sctx);
1734         /* process all writes in a single worker thread. Then the block layer
1735          * orders the requests before sending them to the driver which
1736          * doubled the write performance on spinning disks when measured
1737          * with Linux 3.5 */
1738         btrfsic_submit_bio(sbio->bio);
1739 }
1740
1741 static void scrub_wr_bio_end_io(struct bio *bio)
1742 {
1743         struct scrub_bio *sbio = bio->bi_private;
1744         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1745
1746         sbio->err = bio->bi_error;
1747         sbio->bio = bio;
1748
1749         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1750                          scrub_wr_bio_end_io_worker, NULL, NULL);
1751         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1752 }
1753
1754 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1755 {
1756         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1757         struct scrub_ctx *sctx = sbio->sctx;
1758         int i;
1759
1760         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1761         if (sbio->err) {
1762                 struct btrfs_dev_replace *dev_replace =
1763                         &sbio->sctx->dev_root->fs_info->dev_replace;
1764
1765                 for (i = 0; i < sbio->page_count; i++) {
1766                         struct scrub_page *spage = sbio->pagev[i];
1767
1768                         spage->io_error = 1;
1769                         btrfs_dev_replace_stats_inc(&dev_replace->
1770                                                     num_write_errors);
1771                 }
1772         }
1773
1774         for (i = 0; i < sbio->page_count; i++)
1775                 scrub_page_put(sbio->pagev[i]);
1776
1777         bio_put(sbio->bio);
1778         kfree(sbio);
1779         scrub_pending_bio_dec(sctx);
1780 }
1781
1782 static int scrub_checksum(struct scrub_block *sblock)
1783 {
1784         u64 flags;
1785         int ret;
1786
1787         /*
1788          * No need to initialize these stats currently,
1789          * because this function only use return value
1790          * instead of these stats value.
1791          *
1792          * Todo:
1793          * always use stats
1794          */
1795         sblock->header_error = 0;
1796         sblock->generation_error = 0;
1797         sblock->checksum_error = 0;
1798
1799         WARN_ON(sblock->page_count < 1);
1800         flags = sblock->pagev[0]->flags;
1801         ret = 0;
1802         if (flags & BTRFS_EXTENT_FLAG_DATA)
1803                 ret = scrub_checksum_data(sblock);
1804         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1805                 ret = scrub_checksum_tree_block(sblock);
1806         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1807                 (void)scrub_checksum_super(sblock);
1808         else
1809                 WARN_ON(1);
1810         if (ret)
1811                 scrub_handle_errored_block(sblock);
1812
1813         return ret;
1814 }
1815
1816 static int scrub_checksum_data(struct scrub_block *sblock)
1817 {
1818         struct scrub_ctx *sctx = sblock->sctx;
1819         u8 csum[BTRFS_CSUM_SIZE];
1820         u8 *on_disk_csum;
1821         struct page *page;
1822         void *buffer;
1823         u32 crc = ~(u32)0;
1824         u64 len;
1825         int index;
1826
1827         BUG_ON(sblock->page_count < 1);
1828         if (!sblock->pagev[0]->have_csum)
1829                 return 0;
1830
1831         on_disk_csum = sblock->pagev[0]->csum;
1832         page = sblock->pagev[0]->page;
1833         buffer = kmap_atomic(page);
1834
1835         len = sctx->sectorsize;
1836         index = 0;
1837         for (;;) {
1838                 u64 l = min_t(u64, len, PAGE_SIZE);
1839
1840                 crc = btrfs_csum_data(buffer, crc, l);
1841                 kunmap_atomic(buffer);
1842                 len -= l;
1843                 if (len == 0)
1844                         break;
1845                 index++;
1846                 BUG_ON(index >= sblock->page_count);
1847                 BUG_ON(!sblock->pagev[index]->page);
1848                 page = sblock->pagev[index]->page;
1849                 buffer = kmap_atomic(page);
1850         }
1851
1852         btrfs_csum_final(crc, csum);
1853         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1854                 sblock->checksum_error = 1;
1855
1856         return sblock->checksum_error;
1857 }
1858
1859 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1860 {
1861         struct scrub_ctx *sctx = sblock->sctx;
1862         struct btrfs_header *h;
1863         struct btrfs_root *root = sctx->dev_root;
1864         struct btrfs_fs_info *fs_info = root->fs_info;
1865         u8 calculated_csum[BTRFS_CSUM_SIZE];
1866         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1867         struct page *page;
1868         void *mapped_buffer;
1869         u64 mapped_size;
1870         void *p;
1871         u32 crc = ~(u32)0;
1872         u64 len;
1873         int index;
1874
1875         BUG_ON(sblock->page_count < 1);
1876         page = sblock->pagev[0]->page;
1877         mapped_buffer = kmap_atomic(page);
1878         h = (struct btrfs_header *)mapped_buffer;
1879         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1880
1881         /*
1882          * we don't use the getter functions here, as we
1883          * a) don't have an extent buffer and
1884          * b) the page is already kmapped
1885          */
1886         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1887                 sblock->header_error = 1;
1888
1889         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1890                 sblock->header_error = 1;
1891                 sblock->generation_error = 1;
1892         }
1893
1894         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1895                 sblock->header_error = 1;
1896
1897         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1898                    BTRFS_UUID_SIZE))
1899                 sblock->header_error = 1;
1900
1901         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1902         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1903         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1904         index = 0;
1905         for (;;) {
1906                 u64 l = min_t(u64, len, mapped_size);
1907
1908                 crc = btrfs_csum_data(p, crc, l);
1909                 kunmap_atomic(mapped_buffer);
1910                 len -= l;
1911                 if (len == 0)
1912                         break;
1913                 index++;
1914                 BUG_ON(index >= sblock->page_count);
1915                 BUG_ON(!sblock->pagev[index]->page);
1916                 page = sblock->pagev[index]->page;
1917                 mapped_buffer = kmap_atomic(page);
1918                 mapped_size = PAGE_SIZE;
1919                 p = mapped_buffer;
1920         }
1921
1922         btrfs_csum_final(crc, calculated_csum);
1923         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1924                 sblock->checksum_error = 1;
1925
1926         return sblock->header_error || sblock->checksum_error;
1927 }
1928
1929 static int scrub_checksum_super(struct scrub_block *sblock)
1930 {
1931         struct btrfs_super_block *s;
1932         struct scrub_ctx *sctx = sblock->sctx;
1933         u8 calculated_csum[BTRFS_CSUM_SIZE];
1934         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1935         struct page *page;
1936         void *mapped_buffer;
1937         u64 mapped_size;
1938         void *p;
1939         u32 crc = ~(u32)0;
1940         int fail_gen = 0;
1941         int fail_cor = 0;
1942         u64 len;
1943         int index;
1944
1945         BUG_ON(sblock->page_count < 1);
1946         page = sblock->pagev[0]->page;
1947         mapped_buffer = kmap_atomic(page);
1948         s = (struct btrfs_super_block *)mapped_buffer;
1949         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1950
1951         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1952                 ++fail_cor;
1953
1954         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1955                 ++fail_gen;
1956
1957         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1958                 ++fail_cor;
1959
1960         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1961         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1962         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1963         index = 0;
1964         for (;;) {
1965                 u64 l = min_t(u64, len, mapped_size);
1966
1967                 crc = btrfs_csum_data(p, crc, l);
1968                 kunmap_atomic(mapped_buffer);
1969                 len -= l;
1970                 if (len == 0)
1971                         break;
1972                 index++;
1973                 BUG_ON(index >= sblock->page_count);
1974                 BUG_ON(!sblock->pagev[index]->page);
1975                 page = sblock->pagev[index]->page;
1976                 mapped_buffer = kmap_atomic(page);
1977                 mapped_size = PAGE_SIZE;
1978                 p = mapped_buffer;
1979         }
1980
1981         btrfs_csum_final(crc, calculated_csum);
1982         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1983                 ++fail_cor;
1984
1985         if (fail_cor + fail_gen) {
1986                 /*
1987                  * if we find an error in a super block, we just report it.
1988                  * They will get written with the next transaction commit
1989                  * anyway
1990                  */
1991                 spin_lock(&sctx->stat_lock);
1992                 ++sctx->stat.super_errors;
1993                 spin_unlock(&sctx->stat_lock);
1994                 if (fail_cor)
1995                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1996                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1997                 else
1998                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1999                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2000         }
2001
2002         return fail_cor + fail_gen;
2003 }
2004
2005 static void scrub_block_get(struct scrub_block *sblock)
2006 {
2007         atomic_inc(&sblock->refs);
2008 }
2009
2010 static void scrub_block_put(struct scrub_block *sblock)
2011 {
2012         if (atomic_dec_and_test(&sblock->refs)) {
2013                 int i;
2014
2015                 if (sblock->sparity)
2016                         scrub_parity_put(sblock->sparity);
2017
2018                 for (i = 0; i < sblock->page_count; i++)
2019                         scrub_page_put(sblock->pagev[i]);
2020                 kfree(sblock);
2021         }
2022 }
2023
2024 static void scrub_page_get(struct scrub_page *spage)
2025 {
2026         atomic_inc(&spage->refs);
2027 }
2028
2029 static void scrub_page_put(struct scrub_page *spage)
2030 {
2031         if (atomic_dec_and_test(&spage->refs)) {
2032                 if (spage->page)
2033                         __free_page(spage->page);
2034                 kfree(spage);
2035         }
2036 }
2037
2038 static void scrub_submit(struct scrub_ctx *sctx)
2039 {
2040         struct scrub_bio *sbio;
2041
2042         if (sctx->curr == -1)
2043                 return;
2044
2045         sbio = sctx->bios[sctx->curr];
2046         sctx->curr = -1;
2047         scrub_pending_bio_inc(sctx);
2048         btrfsic_submit_bio(sbio->bio);
2049 }
2050
2051 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2052                                     struct scrub_page *spage)
2053 {
2054         struct scrub_block *sblock = spage->sblock;
2055         struct scrub_bio *sbio;
2056         int ret;
2057
2058 again:
2059         /*
2060          * grab a fresh bio or wait for one to become available
2061          */
2062         while (sctx->curr == -1) {
2063                 spin_lock(&sctx->list_lock);
2064                 sctx->curr = sctx->first_free;
2065                 if (sctx->curr != -1) {
2066                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2067                         sctx->bios[sctx->curr]->next_free = -1;
2068                         sctx->bios[sctx->curr]->page_count = 0;
2069                         spin_unlock(&sctx->list_lock);
2070                 } else {
2071                         spin_unlock(&sctx->list_lock);
2072                         wait_event(sctx->list_wait, sctx->first_free != -1);
2073                 }
2074         }
2075         sbio = sctx->bios[sctx->curr];
2076         if (sbio->page_count == 0) {
2077                 struct bio *bio;
2078
2079                 sbio->physical = spage->physical;
2080                 sbio->logical = spage->logical;
2081                 sbio->dev = spage->dev;
2082                 bio = sbio->bio;
2083                 if (!bio) {
2084                         bio = btrfs_io_bio_alloc(GFP_KERNEL,
2085                                         sctx->pages_per_rd_bio);
2086                         if (!bio)
2087                                 return -ENOMEM;
2088                         sbio->bio = bio;
2089                 }
2090
2091                 bio->bi_private = sbio;
2092                 bio->bi_end_io = scrub_bio_end_io;
2093                 bio->bi_bdev = sbio->dev->bdev;
2094                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2095                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2096                 sbio->err = 0;
2097         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2098                    spage->physical ||
2099                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2100                    spage->logical ||
2101                    sbio->dev != spage->dev) {
2102                 scrub_submit(sctx);
2103                 goto again;
2104         }
2105
2106         sbio->pagev[sbio->page_count] = spage;
2107         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2108         if (ret != PAGE_SIZE) {
2109                 if (sbio->page_count < 1) {
2110                         bio_put(sbio->bio);
2111                         sbio->bio = NULL;
2112                         return -EIO;
2113                 }
2114                 scrub_submit(sctx);
2115                 goto again;
2116         }
2117
2118         scrub_block_get(sblock); /* one for the page added to the bio */
2119         atomic_inc(&sblock->outstanding_pages);
2120         sbio->page_count++;
2121         if (sbio->page_count == sctx->pages_per_rd_bio)
2122                 scrub_submit(sctx);
2123
2124         return 0;
2125 }
2126
2127 static void scrub_missing_raid56_end_io(struct bio *bio)
2128 {
2129         struct scrub_block *sblock = bio->bi_private;
2130         struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2131
2132         if (bio->bi_error)
2133                 sblock->no_io_error_seen = 0;
2134
2135         bio_put(bio);
2136
2137         btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2138 }
2139
2140 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2141 {
2142         struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2143         struct scrub_ctx *sctx = sblock->sctx;
2144         u64 logical;
2145         struct btrfs_device *dev;
2146
2147         logical = sblock->pagev[0]->logical;
2148         dev = sblock->pagev[0]->dev;
2149
2150         if (sblock->no_io_error_seen)
2151                 scrub_recheck_block_checksum(sblock);
2152
2153         if (!sblock->no_io_error_seen) {
2154                 spin_lock(&sctx->stat_lock);
2155                 sctx->stat.read_errors++;
2156                 spin_unlock(&sctx->stat_lock);
2157                 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
2158                         "IO error rebuilding logical %llu for dev %s",
2159                         logical, rcu_str_deref(dev->name));
2160         } else if (sblock->header_error || sblock->checksum_error) {
2161                 spin_lock(&sctx->stat_lock);
2162                 sctx->stat.uncorrectable_errors++;
2163                 spin_unlock(&sctx->stat_lock);
2164                 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
2165                         "failed to rebuild valid logical %llu for dev %s",
2166                         logical, rcu_str_deref(dev->name));
2167         } else {
2168                 scrub_write_block_to_dev_replace(sblock);
2169         }
2170
2171         scrub_block_put(sblock);
2172
2173         if (sctx->is_dev_replace &&
2174             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2175                 mutex_lock(&sctx->wr_ctx.wr_lock);
2176                 scrub_wr_submit(sctx);
2177                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2178         }
2179
2180         scrub_pending_bio_dec(sctx);
2181 }
2182
2183 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2184 {
2185         struct scrub_ctx *sctx = sblock->sctx;
2186         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2187         u64 length = sblock->page_count * PAGE_SIZE;
2188         u64 logical = sblock->pagev[0]->logical;
2189         struct btrfs_bio *bbio = NULL;
2190         struct bio *bio;
2191         struct btrfs_raid_bio *rbio;
2192         int ret;
2193         int i;
2194
2195         ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2196                                &bbio, 0, 1);
2197         if (ret || !bbio || !bbio->raid_map)
2198                 goto bbio_out;
2199
2200         if (WARN_ON(!sctx->is_dev_replace ||
2201                     !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2202                 /*
2203                  * We shouldn't be scrubbing a missing device. Even for dev
2204                  * replace, we should only get here for RAID 5/6. We either
2205                  * managed to mount something with no mirrors remaining or
2206                  * there's a bug in scrub_remap_extent()/btrfs_map_block().
2207                  */
2208                 goto bbio_out;
2209         }
2210
2211         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2212         if (!bio)
2213                 goto bbio_out;
2214
2215         bio->bi_iter.bi_sector = logical >> 9;
2216         bio->bi_private = sblock;
2217         bio->bi_end_io = scrub_missing_raid56_end_io;
2218
2219         rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2220         if (!rbio)
2221                 goto rbio_out;
2222
2223         for (i = 0; i < sblock->page_count; i++) {
2224                 struct scrub_page *spage = sblock->pagev[i];
2225
2226                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2227         }
2228
2229         btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2230                         scrub_missing_raid56_worker, NULL, NULL);
2231         scrub_block_get(sblock);
2232         scrub_pending_bio_inc(sctx);
2233         raid56_submit_missing_rbio(rbio);
2234         return;
2235
2236 rbio_out:
2237         bio_put(bio);
2238 bbio_out:
2239         btrfs_put_bbio(bbio);
2240         spin_lock(&sctx->stat_lock);
2241         sctx->stat.malloc_errors++;
2242         spin_unlock(&sctx->stat_lock);
2243 }
2244
2245 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2246                        u64 physical, struct btrfs_device *dev, u64 flags,
2247                        u64 gen, int mirror_num, u8 *csum, int force,
2248                        u64 physical_for_dev_replace)
2249 {
2250         struct scrub_block *sblock;
2251         int index;
2252
2253         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2254         if (!sblock) {
2255                 spin_lock(&sctx->stat_lock);
2256                 sctx->stat.malloc_errors++;
2257                 spin_unlock(&sctx->stat_lock);
2258                 return -ENOMEM;
2259         }
2260
2261         /* one ref inside this function, plus one for each page added to
2262          * a bio later on */
2263         atomic_set(&sblock->refs, 1);
2264         sblock->sctx = sctx;
2265         sblock->no_io_error_seen = 1;
2266
2267         for (index = 0; len > 0; index++) {
2268                 struct scrub_page *spage;
2269                 u64 l = min_t(u64, len, PAGE_SIZE);
2270
2271                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2272                 if (!spage) {
2273 leave_nomem:
2274                         spin_lock(&sctx->stat_lock);
2275                         sctx->stat.malloc_errors++;
2276                         spin_unlock(&sctx->stat_lock);
2277                         scrub_block_put(sblock);
2278                         return -ENOMEM;
2279                 }
2280                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2281                 scrub_page_get(spage);
2282                 sblock->pagev[index] = spage;
2283                 spage->sblock = sblock;
2284                 spage->dev = dev;
2285                 spage->flags = flags;
2286                 spage->generation = gen;
2287                 spage->logical = logical;
2288                 spage->physical = physical;
2289                 spage->physical_for_dev_replace = physical_for_dev_replace;
2290                 spage->mirror_num = mirror_num;
2291                 if (csum) {
2292                         spage->have_csum = 1;
2293                         memcpy(spage->csum, csum, sctx->csum_size);
2294                 } else {
2295                         spage->have_csum = 0;
2296                 }
2297                 sblock->page_count++;
2298                 spage->page = alloc_page(GFP_KERNEL);
2299                 if (!spage->page)
2300                         goto leave_nomem;
2301                 len -= l;
2302                 logical += l;
2303                 physical += l;
2304                 physical_for_dev_replace += l;
2305         }
2306
2307         WARN_ON(sblock->page_count == 0);
2308         if (dev->missing) {
2309                 /*
2310                  * This case should only be hit for RAID 5/6 device replace. See
2311                  * the comment in scrub_missing_raid56_pages() for details.
2312                  */
2313                 scrub_missing_raid56_pages(sblock);
2314         } else {
2315                 for (index = 0; index < sblock->page_count; index++) {
2316                         struct scrub_page *spage = sblock->pagev[index];
2317                         int ret;
2318
2319                         ret = scrub_add_page_to_rd_bio(sctx, spage);
2320                         if (ret) {
2321                                 scrub_block_put(sblock);
2322                                 return ret;
2323                         }
2324                 }
2325
2326                 if (force)
2327                         scrub_submit(sctx);
2328         }
2329
2330         /* last one frees, either here or in bio completion for last page */
2331         scrub_block_put(sblock);
2332         return 0;
2333 }
2334
2335 static void scrub_bio_end_io(struct bio *bio)
2336 {
2337         struct scrub_bio *sbio = bio->bi_private;
2338         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2339
2340         sbio->err = bio->bi_error;
2341         sbio->bio = bio;
2342
2343         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2344 }
2345
2346 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2347 {
2348         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2349         struct scrub_ctx *sctx = sbio->sctx;
2350         int i;
2351
2352         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2353         if (sbio->err) {
2354                 for (i = 0; i < sbio->page_count; i++) {
2355                         struct scrub_page *spage = sbio->pagev[i];
2356
2357                         spage->io_error = 1;
2358                         spage->sblock->no_io_error_seen = 0;
2359                 }
2360         }
2361
2362         /* now complete the scrub_block items that have all pages completed */
2363         for (i = 0; i < sbio->page_count; i++) {
2364                 struct scrub_page *spage = sbio->pagev[i];
2365                 struct scrub_block *sblock = spage->sblock;
2366
2367                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2368                         scrub_block_complete(sblock);
2369                 scrub_block_put(sblock);
2370         }
2371
2372         bio_put(sbio->bio);
2373         sbio->bio = NULL;
2374         spin_lock(&sctx->list_lock);
2375         sbio->next_free = sctx->first_free;
2376         sctx->first_free = sbio->index;
2377         spin_unlock(&sctx->list_lock);
2378
2379         if (sctx->is_dev_replace &&
2380             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2381                 mutex_lock(&sctx->wr_ctx.wr_lock);
2382                 scrub_wr_submit(sctx);
2383                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2384         }
2385
2386         scrub_pending_bio_dec(sctx);
2387 }
2388
2389 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2390                                        unsigned long *bitmap,
2391                                        u64 start, u64 len)
2392 {
2393         u32 offset;
2394         int nsectors;
2395         int sectorsize = sparity->sctx->dev_root->sectorsize;
2396
2397         if (len >= sparity->stripe_len) {
2398                 bitmap_set(bitmap, 0, sparity->nsectors);
2399                 return;
2400         }
2401
2402         start -= sparity->logic_start;
2403         start = div_u64_rem(start, sparity->stripe_len, &offset);
2404         offset /= sectorsize;
2405         nsectors = (int)len / sectorsize;
2406
2407         if (offset + nsectors <= sparity->nsectors) {
2408                 bitmap_set(bitmap, offset, nsectors);
2409                 return;
2410         }
2411
2412         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2413         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2414 }
2415
2416 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2417                                                    u64 start, u64 len)
2418 {
2419         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2420 }
2421
2422 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2423                                                   u64 start, u64 len)
2424 {
2425         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2426 }
2427
2428 static void scrub_block_complete(struct scrub_block *sblock)
2429 {
2430         int corrupted = 0;
2431
2432         if (!sblock->no_io_error_seen) {
2433                 corrupted = 1;
2434                 scrub_handle_errored_block(sblock);
2435         } else {
2436                 /*
2437                  * if has checksum error, write via repair mechanism in
2438                  * dev replace case, otherwise write here in dev replace
2439                  * case.
2440                  */
2441                 corrupted = scrub_checksum(sblock);
2442                 if (!corrupted && sblock->sctx->is_dev_replace)
2443                         scrub_write_block_to_dev_replace(sblock);
2444         }
2445
2446         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2447                 u64 start = sblock->pagev[0]->logical;
2448                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2449                           PAGE_SIZE;
2450
2451                 scrub_parity_mark_sectors_error(sblock->sparity,
2452                                                 start, end - start);
2453         }
2454 }
2455
2456 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2457 {
2458         struct btrfs_ordered_sum *sum = NULL;
2459         unsigned long index;
2460         unsigned long num_sectors;
2461
2462         while (!list_empty(&sctx->csum_list)) {
2463                 sum = list_first_entry(&sctx->csum_list,
2464                                        struct btrfs_ordered_sum, list);
2465                 if (sum->bytenr > logical)
2466                         return 0;
2467                 if (sum->bytenr + sum->len > logical)
2468                         break;
2469
2470                 ++sctx->stat.csum_discards;
2471                 list_del(&sum->list);
2472                 kfree(sum);
2473                 sum = NULL;
2474         }
2475         if (!sum)
2476                 return 0;
2477
2478         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2479         num_sectors = sum->len / sctx->sectorsize;
2480         memcpy(csum, sum->sums + index, sctx->csum_size);
2481         if (index == num_sectors - 1) {
2482                 list_del(&sum->list);
2483                 kfree(sum);
2484         }
2485         return 1;
2486 }
2487
2488 /* scrub extent tries to collect up to 64 kB for each bio */
2489 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2490                         u64 physical, struct btrfs_device *dev, u64 flags,
2491                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2492 {
2493         int ret;
2494         u8 csum[BTRFS_CSUM_SIZE];
2495         u32 blocksize;
2496
2497         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2498                 blocksize = sctx->sectorsize;
2499                 spin_lock(&sctx->stat_lock);
2500                 sctx->stat.data_extents_scrubbed++;
2501                 sctx->stat.data_bytes_scrubbed += len;
2502                 spin_unlock(&sctx->stat_lock);
2503         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2504                 blocksize = sctx->nodesize;
2505                 spin_lock(&sctx->stat_lock);
2506                 sctx->stat.tree_extents_scrubbed++;
2507                 sctx->stat.tree_bytes_scrubbed += len;
2508                 spin_unlock(&sctx->stat_lock);
2509         } else {
2510                 blocksize = sctx->sectorsize;
2511                 WARN_ON(1);
2512         }
2513
2514         while (len) {
2515                 u64 l = min_t(u64, len, blocksize);
2516                 int have_csum = 0;
2517
2518                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2519                         /* push csums to sbio */
2520                         have_csum = scrub_find_csum(sctx, logical, csum);
2521                         if (have_csum == 0)
2522                                 ++sctx->stat.no_csum;
2523                         if (0 && sctx->is_dev_replace && !have_csum) {
2524                                 ret = copy_nocow_pages(sctx, logical, l,
2525                                                        mirror_num,
2526                                                       physical_for_dev_replace);
2527                                 goto behind_scrub_pages;
2528                         }
2529                 }
2530                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2531                                   mirror_num, have_csum ? csum : NULL, 0,
2532                                   physical_for_dev_replace);
2533 behind_scrub_pages:
2534                 if (ret)
2535                         return ret;
2536                 len -= l;
2537                 logical += l;
2538                 physical += l;
2539                 physical_for_dev_replace += l;
2540         }
2541         return 0;
2542 }
2543
2544 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2545                                   u64 logical, u64 len,
2546                                   u64 physical, struct btrfs_device *dev,
2547                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2548 {
2549         struct scrub_ctx *sctx = sparity->sctx;
2550         struct scrub_block *sblock;
2551         int index;
2552
2553         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2554         if (!sblock) {
2555                 spin_lock(&sctx->stat_lock);
2556                 sctx->stat.malloc_errors++;
2557                 spin_unlock(&sctx->stat_lock);
2558                 return -ENOMEM;
2559         }
2560
2561         /* one ref inside this function, plus one for each page added to
2562          * a bio later on */
2563         atomic_set(&sblock->refs, 1);
2564         sblock->sctx = sctx;
2565         sblock->no_io_error_seen = 1;
2566         sblock->sparity = sparity;
2567         scrub_parity_get(sparity);
2568
2569         for (index = 0; len > 0; index++) {
2570                 struct scrub_page *spage;
2571                 u64 l = min_t(u64, len, PAGE_SIZE);
2572
2573                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2574                 if (!spage) {
2575 leave_nomem:
2576                         spin_lock(&sctx->stat_lock);
2577                         sctx->stat.malloc_errors++;
2578                         spin_unlock(&sctx->stat_lock);
2579                         scrub_block_put(sblock);
2580                         return -ENOMEM;
2581                 }
2582                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2583                 /* For scrub block */
2584                 scrub_page_get(spage);
2585                 sblock->pagev[index] = spage;
2586                 /* For scrub parity */
2587                 scrub_page_get(spage);
2588                 list_add_tail(&spage->list, &sparity->spages);
2589                 spage->sblock = sblock;
2590                 spage->dev = dev;
2591                 spage->flags = flags;
2592                 spage->generation = gen;
2593                 spage->logical = logical;
2594                 spage->physical = physical;
2595                 spage->mirror_num = mirror_num;
2596                 if (csum) {
2597                         spage->have_csum = 1;
2598                         memcpy(spage->csum, csum, sctx->csum_size);
2599                 } else {
2600                         spage->have_csum = 0;
2601                 }
2602                 sblock->page_count++;
2603                 spage->page = alloc_page(GFP_KERNEL);
2604                 if (!spage->page)
2605                         goto leave_nomem;
2606                 len -= l;
2607                 logical += l;
2608                 physical += l;
2609         }
2610
2611         WARN_ON(sblock->page_count == 0);
2612         for (index = 0; index < sblock->page_count; index++) {
2613                 struct scrub_page *spage = sblock->pagev[index];
2614                 int ret;
2615
2616                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2617                 if (ret) {
2618                         scrub_block_put(sblock);
2619                         return ret;
2620                 }
2621         }
2622
2623         /* last one frees, either here or in bio completion for last page */
2624         scrub_block_put(sblock);
2625         return 0;
2626 }
2627
2628 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2629                                    u64 logical, u64 len,
2630                                    u64 physical, struct btrfs_device *dev,
2631                                    u64 flags, u64 gen, int mirror_num)
2632 {
2633         struct scrub_ctx *sctx = sparity->sctx;
2634         int ret;
2635         u8 csum[BTRFS_CSUM_SIZE];
2636         u32 blocksize;
2637
2638         if (dev->missing) {
2639                 scrub_parity_mark_sectors_error(sparity, logical, len);
2640                 return 0;
2641         }
2642
2643         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2644                 blocksize = sctx->sectorsize;
2645         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2646                 blocksize = sctx->nodesize;
2647         } else {
2648                 blocksize = sctx->sectorsize;
2649                 WARN_ON(1);
2650         }
2651
2652         while (len) {
2653                 u64 l = min_t(u64, len, blocksize);
2654                 int have_csum = 0;
2655
2656                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2657                         /* push csums to sbio */
2658                         have_csum = scrub_find_csum(sctx, logical, csum);
2659                         if (have_csum == 0)
2660                                 goto skip;
2661                 }
2662                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2663                                              flags, gen, mirror_num,
2664                                              have_csum ? csum : NULL);
2665                 if (ret)
2666                         return ret;
2667 skip:
2668                 len -= l;
2669                 logical += l;
2670                 physical += l;
2671         }
2672         return 0;
2673 }
2674
2675 /*
2676  * Given a physical address, this will calculate it's
2677  * logical offset. if this is a parity stripe, it will return
2678  * the most left data stripe's logical offset.
2679  *
2680  * return 0 if it is a data stripe, 1 means parity stripe.
2681  */
2682 static int get_raid56_logic_offset(u64 physical, int num,
2683                                    struct map_lookup *map, u64 *offset,
2684                                    u64 *stripe_start)
2685 {
2686         int i;
2687         int j = 0;
2688         u64 stripe_nr;
2689         u64 last_offset;
2690         u32 stripe_index;
2691         u32 rot;
2692
2693         last_offset = (physical - map->stripes[num].physical) *
2694                       nr_data_stripes(map);
2695         if (stripe_start)
2696                 *stripe_start = last_offset;
2697
2698         *offset = last_offset;
2699         for (i = 0; i < nr_data_stripes(map); i++) {
2700                 *offset = last_offset + i * map->stripe_len;
2701
2702                 stripe_nr = div_u64(*offset, map->stripe_len);
2703                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2704
2705                 /* Work out the disk rotation on this stripe-set */
2706                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2707                 /* calculate which stripe this data locates */
2708                 rot += i;
2709                 stripe_index = rot % map->num_stripes;
2710                 if (stripe_index == num)
2711                         return 0;
2712                 if (stripe_index < num)
2713                         j++;
2714         }
2715         *offset = last_offset + j * map->stripe_len;
2716         return 1;
2717 }
2718
2719 static void scrub_free_parity(struct scrub_parity *sparity)
2720 {
2721         struct scrub_ctx *sctx = sparity->sctx;
2722         struct scrub_page *curr, *next;
2723         int nbits;
2724
2725         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2726         if (nbits) {
2727                 spin_lock(&sctx->stat_lock);
2728                 sctx->stat.read_errors += nbits;
2729                 sctx->stat.uncorrectable_errors += nbits;
2730                 spin_unlock(&sctx->stat_lock);
2731         }
2732
2733         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2734                 list_del_init(&curr->list);
2735                 scrub_page_put(curr);
2736         }
2737
2738         kfree(sparity);
2739 }
2740
2741 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2742 {
2743         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2744                                                     work);
2745         struct scrub_ctx *sctx = sparity->sctx;
2746
2747         scrub_free_parity(sparity);
2748         scrub_pending_bio_dec(sctx);
2749 }
2750
2751 static void scrub_parity_bio_endio(struct bio *bio)
2752 {
2753         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2754
2755         if (bio->bi_error)
2756                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2757                           sparity->nsectors);
2758
2759         bio_put(bio);
2760
2761         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2762                         scrub_parity_bio_endio_worker, NULL, NULL);
2763         btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2764                          &sparity->work);
2765 }
2766
2767 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2768 {
2769         struct scrub_ctx *sctx = sparity->sctx;
2770         struct bio *bio;
2771         struct btrfs_raid_bio *rbio;
2772         struct scrub_page *spage;
2773         struct btrfs_bio *bbio = NULL;
2774         u64 length;
2775         int ret;
2776
2777         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2778                            sparity->nsectors))
2779                 goto out;
2780
2781         length = sparity->logic_end - sparity->logic_start;
2782         ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2783                                sparity->logic_start,
2784                                &length, &bbio, 0, 1);
2785         if (ret || !bbio || !bbio->raid_map)
2786                 goto bbio_out;
2787
2788         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2789         if (!bio)
2790                 goto bbio_out;
2791
2792         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2793         bio->bi_private = sparity;
2794         bio->bi_end_io = scrub_parity_bio_endio;
2795
2796         rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2797                                               length, sparity->scrub_dev,
2798                                               sparity->dbitmap,
2799                                               sparity->nsectors);
2800         if (!rbio)
2801                 goto rbio_out;
2802
2803         list_for_each_entry(spage, &sparity->spages, list)
2804                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2805
2806         scrub_pending_bio_inc(sctx);
2807         raid56_parity_submit_scrub_rbio(rbio);
2808         return;
2809
2810 rbio_out:
2811         bio_put(bio);
2812 bbio_out:
2813         btrfs_put_bbio(bbio);
2814         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2815                   sparity->nsectors);
2816         spin_lock(&sctx->stat_lock);
2817         sctx->stat.malloc_errors++;
2818         spin_unlock(&sctx->stat_lock);
2819 out:
2820         scrub_free_parity(sparity);
2821 }
2822
2823 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2824 {
2825         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2826 }
2827
2828 static void scrub_parity_get(struct scrub_parity *sparity)
2829 {
2830         atomic_inc(&sparity->refs);
2831 }
2832
2833 static void scrub_parity_put(struct scrub_parity *sparity)
2834 {
2835         if (!atomic_dec_and_test(&sparity->refs))
2836                 return;
2837
2838         scrub_parity_check_and_repair(sparity);
2839 }
2840
2841 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2842                                                   struct map_lookup *map,
2843                                                   struct btrfs_device *sdev,
2844                                                   struct btrfs_path *path,
2845                                                   u64 logic_start,
2846                                                   u64 logic_end)
2847 {
2848         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2849         struct btrfs_root *root = fs_info->extent_root;
2850         struct btrfs_root *csum_root = fs_info->csum_root;
2851         struct btrfs_extent_item *extent;
2852         struct btrfs_bio *bbio = NULL;
2853         u64 flags;
2854         int ret;
2855         int slot;
2856         struct extent_buffer *l;
2857         struct btrfs_key key;
2858         u64 generation;
2859         u64 extent_logical;
2860         u64 extent_physical;
2861         u64 extent_len;
2862         u64 mapped_length;
2863         struct btrfs_device *extent_dev;
2864         struct scrub_parity *sparity;
2865         int nsectors;
2866         int bitmap_len;
2867         int extent_mirror_num;
2868         int stop_loop = 0;
2869
2870         nsectors = div_u64(map->stripe_len, root->sectorsize);
2871         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2872         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2873                           GFP_NOFS);
2874         if (!sparity) {
2875                 spin_lock(&sctx->stat_lock);
2876                 sctx->stat.malloc_errors++;
2877                 spin_unlock(&sctx->stat_lock);
2878                 return -ENOMEM;
2879         }
2880
2881         sparity->stripe_len = map->stripe_len;
2882         sparity->nsectors = nsectors;
2883         sparity->sctx = sctx;
2884         sparity->scrub_dev = sdev;
2885         sparity->logic_start = logic_start;
2886         sparity->logic_end = logic_end;
2887         atomic_set(&sparity->refs, 1);
2888         INIT_LIST_HEAD(&sparity->spages);
2889         sparity->dbitmap = sparity->bitmap;
2890         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2891
2892         ret = 0;
2893         while (logic_start < logic_end) {
2894                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2895                         key.type = BTRFS_METADATA_ITEM_KEY;
2896                 else
2897                         key.type = BTRFS_EXTENT_ITEM_KEY;
2898                 key.objectid = logic_start;
2899                 key.offset = (u64)-1;
2900
2901                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2902                 if (ret < 0)
2903                         goto out;
2904
2905                 if (ret > 0) {
2906                         ret = btrfs_previous_extent_item(root, path, 0);
2907                         if (ret < 0)
2908                                 goto out;
2909                         if (ret > 0) {
2910                                 btrfs_release_path(path);
2911                                 ret = btrfs_search_slot(NULL, root, &key,
2912                                                         path, 0, 0);
2913                                 if (ret < 0)
2914                                         goto out;
2915                         }
2916                 }
2917
2918                 stop_loop = 0;
2919                 while (1) {
2920                         u64 bytes;
2921
2922                         l = path->nodes[0];
2923                         slot = path->slots[0];
2924                         if (slot >= btrfs_header_nritems(l)) {
2925                                 ret = btrfs_next_leaf(root, path);
2926                                 if (ret == 0)
2927                                         continue;
2928                                 if (ret < 0)
2929                                         goto out;
2930
2931                                 stop_loop = 1;
2932                                 break;
2933                         }
2934                         btrfs_item_key_to_cpu(l, &key, slot);
2935
2936                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2937                             key.type != BTRFS_METADATA_ITEM_KEY)
2938                                 goto next;
2939
2940                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2941                                 bytes = root->nodesize;
2942                         else
2943                                 bytes = key.offset;
2944
2945                         if (key.objectid + bytes <= logic_start)
2946                                 goto next;
2947
2948                         if (key.objectid >= logic_end) {
2949                                 stop_loop = 1;
2950                                 break;
2951                         }
2952
2953                         while (key.objectid >= logic_start + map->stripe_len)
2954                                 logic_start += map->stripe_len;
2955
2956                         extent = btrfs_item_ptr(l, slot,
2957                                                 struct btrfs_extent_item);
2958                         flags = btrfs_extent_flags(l, extent);
2959                         generation = btrfs_extent_generation(l, extent);
2960
2961                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2962                             (key.objectid < logic_start ||
2963                              key.objectid + bytes >
2964                              logic_start + map->stripe_len)) {
2965                                 btrfs_err(fs_info,
2966                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2967                                           key.objectid, logic_start);
2968                                 spin_lock(&sctx->stat_lock);
2969                                 sctx->stat.uncorrectable_errors++;
2970                                 spin_unlock(&sctx->stat_lock);
2971                                 goto next;
2972                         }
2973 again:
2974                         extent_logical = key.objectid;
2975                         extent_len = bytes;
2976
2977                         if (extent_logical < logic_start) {
2978                                 extent_len -= logic_start - extent_logical;
2979                                 extent_logical = logic_start;
2980                         }
2981
2982                         if (extent_logical + extent_len >
2983                             logic_start + map->stripe_len)
2984                                 extent_len = logic_start + map->stripe_len -
2985                                              extent_logical;
2986
2987                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2988                                                        extent_len);
2989
2990                         mapped_length = extent_len;
2991                         bbio = NULL;
2992                         ret = btrfs_map_block(fs_info, READ, extent_logical,
2993                                               &mapped_length, &bbio, 0);
2994                         if (!ret) {
2995                                 if (!bbio || mapped_length < extent_len)
2996                                         ret = -EIO;
2997                         }
2998                         if (ret) {
2999                                 btrfs_put_bbio(bbio);
3000                                 goto out;
3001                         }
3002                         extent_physical = bbio->stripes[0].physical;
3003                         extent_mirror_num = bbio->mirror_num;
3004                         extent_dev = bbio->stripes[0].dev;
3005                         btrfs_put_bbio(bbio);
3006
3007                         ret = btrfs_lookup_csums_range(csum_root,
3008                                                 extent_logical,
3009                                                 extent_logical + extent_len - 1,
3010                                                 &sctx->csum_list, 1);
3011                         if (ret)
3012                                 goto out;
3013
3014                         ret = scrub_extent_for_parity(sparity, extent_logical,
3015                                                       extent_len,
3016                                                       extent_physical,
3017                                                       extent_dev, flags,
3018                                                       generation,
3019                                                       extent_mirror_num);
3020
3021                         scrub_free_csums(sctx);
3022
3023                         if (ret)
3024                                 goto out;
3025
3026                         if (extent_logical + extent_len <
3027                             key.objectid + bytes) {
3028                                 logic_start += map->stripe_len;
3029
3030                                 if (logic_start >= logic_end) {
3031                                         stop_loop = 1;
3032                                         break;
3033                                 }
3034
3035                                 if (logic_start < key.objectid + bytes) {
3036                                         cond_resched();
3037                                         goto again;
3038                                 }
3039                         }
3040 next:
3041                         path->slots[0]++;
3042                 }
3043
3044                 btrfs_release_path(path);
3045
3046                 if (stop_loop)
3047                         break;
3048
3049                 logic_start += map->stripe_len;
3050         }
3051 out:
3052         if (ret < 0)
3053                 scrub_parity_mark_sectors_error(sparity, logic_start,
3054                                                 logic_end - logic_start);
3055         scrub_parity_put(sparity);
3056         scrub_submit(sctx);
3057         mutex_lock(&sctx->wr_ctx.wr_lock);
3058         scrub_wr_submit(sctx);
3059         mutex_unlock(&sctx->wr_ctx.wr_lock);
3060
3061         btrfs_release_path(path);
3062         return ret < 0 ? ret : 0;
3063 }
3064
3065 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3066                                            struct map_lookup *map,
3067                                            struct btrfs_device *scrub_dev,
3068                                            int num, u64 base, u64 length,
3069                                            int is_dev_replace)
3070 {
3071         struct btrfs_path *path, *ppath;
3072         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3073         struct btrfs_root *root = fs_info->extent_root;
3074         struct btrfs_root *csum_root = fs_info->csum_root;
3075         struct btrfs_extent_item *extent;
3076         struct blk_plug plug;
3077         u64 flags;
3078         int ret;
3079         int slot;
3080         u64 nstripes;
3081         struct extent_buffer *l;
3082         u64 physical;
3083         u64 logical;
3084         u64 logic_end;
3085         u64 physical_end;
3086         u64 generation;
3087         int mirror_num;
3088         struct reada_control *reada1;
3089         struct reada_control *reada2;
3090         struct btrfs_key key;
3091         struct btrfs_key key_end;
3092         u64 increment = map->stripe_len;
3093         u64 offset;
3094         u64 extent_logical;
3095         u64 extent_physical;
3096         u64 extent_len;
3097         u64 stripe_logical;
3098         u64 stripe_end;
3099         struct btrfs_device *extent_dev;
3100         int extent_mirror_num;
3101         int stop_loop = 0;
3102
3103         physical = map->stripes[num].physical;
3104         offset = 0;
3105         nstripes = div_u64(length, map->stripe_len);
3106         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3107                 offset = map->stripe_len * num;
3108                 increment = map->stripe_len * map->num_stripes;
3109                 mirror_num = 1;
3110         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3111                 int factor = map->num_stripes / map->sub_stripes;
3112                 offset = map->stripe_len * (num / map->sub_stripes);
3113                 increment = map->stripe_len * factor;
3114                 mirror_num = num % map->sub_stripes + 1;
3115         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3116                 increment = map->stripe_len;
3117                 mirror_num = num % map->num_stripes + 1;
3118         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3119                 increment = map->stripe_len;
3120                 mirror_num = num % map->num_stripes + 1;
3121         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3122                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3123                 increment = map->stripe_len * nr_data_stripes(map);
3124                 mirror_num = 1;
3125         } else {
3126                 increment = map->stripe_len;
3127                 mirror_num = 1;
3128         }
3129
3130         path = btrfs_alloc_path();
3131         if (!path)
3132                 return -ENOMEM;
3133
3134         ppath = btrfs_alloc_path();
3135         if (!ppath) {
3136                 btrfs_free_path(path);
3137                 return -ENOMEM;
3138         }
3139
3140         /*
3141          * work on commit root. The related disk blocks are static as
3142          * long as COW is applied. This means, it is save to rewrite
3143          * them to repair disk errors without any race conditions
3144          */
3145         path->search_commit_root = 1;
3146         path->skip_locking = 1;
3147
3148         ppath->search_commit_root = 1;
3149         ppath->skip_locking = 1;
3150         /*
3151          * trigger the readahead for extent tree csum tree and wait for
3152          * completion. During readahead, the scrub is officially paused
3153          * to not hold off transaction commits
3154          */
3155         logical = base + offset;
3156         physical_end = physical + nstripes * map->stripe_len;
3157         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3158                 get_raid56_logic_offset(physical_end, num,
3159                                         map, &logic_end, NULL);
3160                 logic_end += base;
3161         } else {
3162                 logic_end = logical + increment * nstripes;
3163         }
3164         wait_event(sctx->list_wait,
3165                    atomic_read(&sctx->bios_in_flight) == 0);
3166         scrub_blocked_if_needed(fs_info);
3167
3168         /* FIXME it might be better to start readahead at commit root */
3169         key.objectid = logical;
3170         key.type = BTRFS_EXTENT_ITEM_KEY;
3171         key.offset = (u64)0;
3172         key_end.objectid = logic_end;
3173         key_end.type = BTRFS_METADATA_ITEM_KEY;
3174         key_end.offset = (u64)-1;
3175         reada1 = btrfs_reada_add(root, &key, &key_end);
3176
3177         key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3178         key.type = BTRFS_EXTENT_CSUM_KEY;
3179         key.offset = logical;
3180         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3181         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3182         key_end.offset = logic_end;
3183         reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3184
3185         if (!IS_ERR(reada1))
3186                 btrfs_reada_wait(reada1);
3187         if (!IS_ERR(reada2))
3188                 btrfs_reada_wait(reada2);
3189
3190
3191         /*
3192          * collect all data csums for the stripe to avoid seeking during
3193          * the scrub. This might currently (crc32) end up to be about 1MB
3194          */
3195         blk_start_plug(&plug);
3196
3197         /*
3198          * now find all extents for each stripe and scrub them
3199          */
3200         ret = 0;
3201         while (physical < physical_end) {
3202                 /*
3203                  * canceled?
3204                  */
3205                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3206                     atomic_read(&sctx->cancel_req)) {
3207                         ret = -ECANCELED;
3208                         goto out;
3209                 }
3210                 /*
3211                  * check to see if we have to pause
3212                  */
3213                 if (atomic_read(&fs_info->scrub_pause_req)) {
3214                         /* push queued extents */
3215                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3216                         scrub_submit(sctx);
3217                         mutex_lock(&sctx->wr_ctx.wr_lock);
3218                         scrub_wr_submit(sctx);
3219                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3220                         wait_event(sctx->list_wait,
3221                                    atomic_read(&sctx->bios_in_flight) == 0);
3222                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3223                         scrub_blocked_if_needed(fs_info);
3224                 }
3225
3226                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3227                         ret = get_raid56_logic_offset(physical, num, map,
3228                                                       &logical,
3229                                                       &stripe_logical);
3230                         logical += base;
3231                         if (ret) {
3232                                 /* it is parity strip */
3233                                 stripe_logical += base;
3234                                 stripe_end = stripe_logical + increment;
3235                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3236                                                           ppath, stripe_logical,
3237                                                           stripe_end);
3238                                 if (ret)
3239                                         goto out;
3240                                 goto skip;
3241                         }
3242                 }
3243
3244                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3245                         key.type = BTRFS_METADATA_ITEM_KEY;
3246                 else
3247                         key.type = BTRFS_EXTENT_ITEM_KEY;
3248                 key.objectid = logical;
3249                 key.offset = (u64)-1;
3250
3251                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3252                 if (ret < 0)
3253                         goto out;
3254
3255                 if (ret > 0) {
3256                         ret = btrfs_previous_extent_item(root, path, 0);
3257                         if (ret < 0)
3258                                 goto out;
3259                         if (ret > 0) {
3260                                 /* there's no smaller item, so stick with the
3261                                  * larger one */
3262                                 btrfs_release_path(path);
3263                                 ret = btrfs_search_slot(NULL, root, &key,
3264                                                         path, 0, 0);
3265                                 if (ret < 0)
3266                                         goto out;
3267                         }
3268                 }
3269
3270                 stop_loop = 0;
3271                 while (1) {
3272                         u64 bytes;
3273
3274                         l = path->nodes[0];
3275                         slot = path->slots[0];
3276                         if (slot >= btrfs_header_nritems(l)) {
3277                                 ret = btrfs_next_leaf(root, path);
3278                                 if (ret == 0)
3279                                         continue;
3280                                 if (ret < 0)
3281                                         goto out;
3282
3283                                 stop_loop = 1;
3284                                 break;
3285                         }
3286                         btrfs_item_key_to_cpu(l, &key, slot);
3287
3288                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3289                             key.type != BTRFS_METADATA_ITEM_KEY)
3290                                 goto next;
3291
3292                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3293                                 bytes = root->nodesize;
3294                         else
3295                                 bytes = key.offset;
3296
3297                         if (key.objectid + bytes <= logical)
3298                                 goto next;
3299
3300                         if (key.objectid >= logical + map->stripe_len) {
3301                                 /* out of this device extent */
3302                                 if (key.objectid >= logic_end)
3303                                         stop_loop = 1;
3304                                 break;
3305                         }
3306
3307                         extent = btrfs_item_ptr(l, slot,
3308                                                 struct btrfs_extent_item);
3309                         flags = btrfs_extent_flags(l, extent);
3310                         generation = btrfs_extent_generation(l, extent);
3311
3312                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3313                             (key.objectid < logical ||
3314                              key.objectid + bytes >
3315                              logical + map->stripe_len)) {
3316                                 btrfs_err(fs_info,
3317                                            "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3318                                        key.objectid, logical);
3319                                 spin_lock(&sctx->stat_lock);
3320                                 sctx->stat.uncorrectable_errors++;
3321                                 spin_unlock(&sctx->stat_lock);
3322                                 goto next;
3323                         }
3324
3325 again:
3326                         extent_logical = key.objectid;
3327                         extent_len = bytes;
3328
3329                         /*
3330                          * trim extent to this stripe
3331                          */
3332                         if (extent_logical < logical) {
3333                                 extent_len -= logical - extent_logical;
3334                                 extent_logical = logical;
3335                         }
3336                         if (extent_logical + extent_len >
3337                             logical + map->stripe_len) {
3338                                 extent_len = logical + map->stripe_len -
3339                                              extent_logical;
3340                         }
3341
3342                         extent_physical = extent_logical - logical + physical;
3343                         extent_dev = scrub_dev;
3344                         extent_mirror_num = mirror_num;
3345                         if (is_dev_replace)
3346                                 scrub_remap_extent(fs_info, extent_logical,
3347                                                    extent_len, &extent_physical,
3348                                                    &extent_dev,
3349                                                    &extent_mirror_num);
3350
3351                         ret = btrfs_lookup_csums_range(csum_root,
3352                                                        extent_logical,
3353                                                        extent_logical +
3354                                                        extent_len - 1,
3355                                                        &sctx->csum_list, 1);
3356                         if (ret)
3357                                 goto out;
3358
3359                         ret = scrub_extent(sctx, extent_logical, extent_len,
3360                                            extent_physical, extent_dev, flags,
3361                                            generation, extent_mirror_num,
3362                                            extent_logical - logical + physical);
3363
3364                         scrub_free_csums(sctx);
3365
3366                         if (ret)
3367                                 goto out;
3368
3369                         if (extent_logical + extent_len <
3370                             key.objectid + bytes) {
3371                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3372                                         /*
3373                                          * loop until we find next data stripe
3374                                          * or we have finished all stripes.
3375                                          */
3376 loop:
3377                                         physical += map->stripe_len;
3378                                         ret = get_raid56_logic_offset(physical,
3379                                                         num, map, &logical,
3380                                                         &stripe_logical);
3381                                         logical += base;
3382
3383                                         if (ret && physical < physical_end) {
3384                                                 stripe_logical += base;
3385                                                 stripe_end = stripe_logical +
3386                                                                 increment;
3387                                                 ret = scrub_raid56_parity(sctx,
3388                                                         map, scrub_dev, ppath,
3389                                                         stripe_logical,
3390                                                         stripe_end);
3391                                                 if (ret)
3392                                                         goto out;
3393                                                 goto loop;
3394                                         }
3395                                 } else {
3396                                         physical += map->stripe_len;
3397                                         logical += increment;
3398                                 }
3399                                 if (logical < key.objectid + bytes) {
3400                                         cond_resched();
3401                                         goto again;
3402                                 }
3403
3404                                 if (physical >= physical_end) {
3405                                         stop_loop = 1;
3406                                         break;
3407                                 }
3408                         }
3409 next:
3410                         path->slots[0]++;
3411                 }
3412                 btrfs_release_path(path);
3413 skip:
3414                 logical += increment;
3415                 physical += map->stripe_len;
3416                 spin_lock(&sctx->stat_lock);
3417                 if (stop_loop)
3418                         sctx->stat.last_physical = map->stripes[num].physical +
3419                                                    length;
3420                 else
3421                         sctx->stat.last_physical = physical;
3422                 spin_unlock(&sctx->stat_lock);
3423                 if (stop_loop)
3424                         break;
3425         }
3426 out:
3427         /* push queued extents */
3428         scrub_submit(sctx);
3429         mutex_lock(&sctx->wr_ctx.wr_lock);
3430         scrub_wr_submit(sctx);
3431         mutex_unlock(&sctx->wr_ctx.wr_lock);
3432
3433         blk_finish_plug(&plug);
3434         btrfs_free_path(path);
3435         btrfs_free_path(ppath);
3436         return ret < 0 ? ret : 0;
3437 }
3438
3439 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3440                                           struct btrfs_device *scrub_dev,
3441                                           u64 chunk_offset, u64 length,
3442                                           u64 dev_offset,
3443                                           struct btrfs_block_group_cache *cache,
3444                                           int is_dev_replace)
3445 {
3446         struct btrfs_mapping_tree *map_tree =
3447                 &sctx->dev_root->fs_info->mapping_tree;
3448         struct map_lookup *map;
3449         struct extent_map *em;
3450         int i;
3451         int ret = 0;
3452
3453         read_lock(&map_tree->map_tree.lock);
3454         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3455         read_unlock(&map_tree->map_tree.lock);
3456
3457         if (!em) {
3458                 /*
3459                  * Might have been an unused block group deleted by the cleaner
3460                  * kthread or relocation.
3461                  */
3462                 spin_lock(&cache->lock);
3463                 if (!cache->removed)
3464                         ret = -EINVAL;
3465                 spin_unlock(&cache->lock);
3466
3467                 return ret;
3468         }
3469
3470         map = em->map_lookup;
3471         if (em->start != chunk_offset)
3472                 goto out;
3473
3474         if (em->len < length)
3475                 goto out;
3476
3477         for (i = 0; i < map->num_stripes; ++i) {
3478                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3479                     map->stripes[i].physical == dev_offset) {
3480                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3481                                            chunk_offset, length,
3482                                            is_dev_replace);
3483                         if (ret)
3484                                 goto out;
3485                 }
3486         }
3487 out:
3488         free_extent_map(em);
3489
3490         return ret;
3491 }
3492
3493 static noinline_for_stack
3494 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3495                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3496                            int is_dev_replace)
3497 {
3498         struct btrfs_dev_extent *dev_extent = NULL;
3499         struct btrfs_path *path;
3500         struct btrfs_root *root = sctx->dev_root;
3501         struct btrfs_fs_info *fs_info = root->fs_info;
3502         u64 length;
3503         u64 chunk_offset;
3504         int ret = 0;
3505         int ro_set;
3506         int slot;
3507         struct extent_buffer *l;
3508         struct btrfs_key key;
3509         struct btrfs_key found_key;
3510         struct btrfs_block_group_cache *cache;
3511         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3512
3513         path = btrfs_alloc_path();
3514         if (!path)
3515                 return -ENOMEM;
3516
3517         path->reada = READA_FORWARD;
3518         path->search_commit_root = 1;
3519         path->skip_locking = 1;
3520
3521         key.objectid = scrub_dev->devid;
3522         key.offset = 0ull;
3523         key.type = BTRFS_DEV_EXTENT_KEY;
3524
3525         while (1) {
3526                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3527                 if (ret < 0)
3528                         break;
3529                 if (ret > 0) {
3530                         if (path->slots[0] >=
3531                             btrfs_header_nritems(path->nodes[0])) {
3532                                 ret = btrfs_next_leaf(root, path);
3533                                 if (ret < 0)
3534                                         break;
3535                                 if (ret > 0) {
3536                                         ret = 0;
3537                                         break;
3538                                 }
3539                         } else {
3540                                 ret = 0;
3541                         }
3542                 }
3543
3544                 l = path->nodes[0];
3545                 slot = path->slots[0];
3546
3547                 btrfs_item_key_to_cpu(l, &found_key, slot);
3548
3549                 if (found_key.objectid != scrub_dev->devid)
3550                         break;
3551
3552                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3553                         break;
3554
3555                 if (found_key.offset >= end)
3556                         break;
3557
3558                 if (found_key.offset < key.offset)
3559                         break;
3560
3561                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3562                 length = btrfs_dev_extent_length(l, dev_extent);
3563
3564                 if (found_key.offset + length <= start)
3565                         goto skip;
3566
3567                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3568
3569                 /*
3570                  * get a reference on the corresponding block group to prevent
3571                  * the chunk from going away while we scrub it
3572                  */
3573                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3574
3575                 /* some chunks are removed but not committed to disk yet,
3576                  * continue scrubbing */
3577                 if (!cache)
3578                         goto skip;
3579
3580                 /*
3581                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3582                  * to avoid deadlock caused by:
3583                  * btrfs_inc_block_group_ro()
3584                  * -> btrfs_wait_for_commit()
3585                  * -> btrfs_commit_transaction()
3586                  * -> btrfs_scrub_pause()
3587                  */
3588                 scrub_pause_on(fs_info);
3589                 ret = btrfs_inc_block_group_ro(root, cache);
3590                 if (!ret && is_dev_replace) {
3591                         /*
3592                          * If we are doing a device replace wait for any tasks
3593                          * that started dellaloc right before we set the block
3594                          * group to RO mode, as they might have just allocated
3595                          * an extent from it or decided they could do a nocow
3596                          * write. And if any such tasks did that, wait for their
3597                          * ordered extents to complete and then commit the
3598                          * current transaction, so that we can later see the new
3599                          * extent items in the extent tree - the ordered extents
3600                          * create delayed data references (for cow writes) when
3601                          * they complete, which will be run and insert the
3602                          * corresponding extent items into the extent tree when
3603                          * we commit the transaction they used when running
3604                          * inode.c:btrfs_finish_ordered_io(). We later use
3605                          * the commit root of the extent tree to find extents
3606                          * to copy from the srcdev into the tgtdev, and we don't
3607                          * want to miss any new extents.
3608                          */
3609                         btrfs_wait_block_group_reservations(cache);
3610                         btrfs_wait_nocow_writers(cache);
3611                         ret = btrfs_wait_ordered_roots(fs_info, -1,
3612                                                        cache->key.objectid,
3613                                                        cache->key.offset);
3614                         if (ret > 0) {
3615                                 struct btrfs_trans_handle *trans;
3616
3617                                 trans = btrfs_join_transaction(root);
3618                                 if (IS_ERR(trans))
3619                                         ret = PTR_ERR(trans);
3620                                 else
3621                                         ret = btrfs_commit_transaction(trans,
3622                                                                        root);
3623                                 if (ret) {
3624                                         scrub_pause_off(fs_info);
3625                                         btrfs_put_block_group(cache);
3626                                         break;
3627                                 }
3628                         }
3629                 }
3630                 scrub_pause_off(fs_info);
3631
3632                 if (ret == 0) {
3633                         ro_set = 1;
3634                 } else if (ret == -ENOSPC) {
3635                         /*
3636                          * btrfs_inc_block_group_ro return -ENOSPC when it
3637                          * failed in creating new chunk for metadata.
3638                          * It is not a problem for scrub/replace, because
3639                          * metadata are always cowed, and our scrub paused
3640                          * commit_transactions.
3641                          */
3642                         ro_set = 0;
3643                 } else {
3644                         btrfs_warn(fs_info,
3645                                    "failed setting block group ro, ret=%d\n",
3646                                    ret);
3647                         btrfs_put_block_group(cache);
3648                         break;
3649                 }
3650
3651                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3652                 dev_replace->cursor_right = found_key.offset + length;
3653                 dev_replace->cursor_left = found_key.offset;
3654                 dev_replace->item_needs_writeback = 1;
3655                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3656                 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3657                                   found_key.offset, cache, is_dev_replace);
3658
3659                 /*
3660                  * flush, submit all pending read and write bios, afterwards
3661                  * wait for them.
3662                  * Note that in the dev replace case, a read request causes
3663                  * write requests that are submitted in the read completion
3664                  * worker. Therefore in the current situation, it is required
3665                  * that all write requests are flushed, so that all read and
3666                  * write requests are really completed when bios_in_flight
3667                  * changes to 0.
3668                  */
3669                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3670                 scrub_submit(sctx);
3671                 mutex_lock(&sctx->wr_ctx.wr_lock);
3672                 scrub_wr_submit(sctx);
3673                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3674
3675                 wait_event(sctx->list_wait,
3676                            atomic_read(&sctx->bios_in_flight) == 0);
3677
3678                 scrub_pause_on(fs_info);
3679
3680                 /*
3681                  * must be called before we decrease @scrub_paused.
3682                  * make sure we don't block transaction commit while
3683                  * we are waiting pending workers finished.
3684                  */
3685                 wait_event(sctx->list_wait,
3686                            atomic_read(&sctx->workers_pending) == 0);
3687                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3688
3689                 scrub_pause_off(fs_info);
3690
3691                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3692                 dev_replace->cursor_left = dev_replace->cursor_right;
3693                 dev_replace->item_needs_writeback = 1;
3694                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3695
3696                 if (ro_set)
3697                         btrfs_dec_block_group_ro(root, cache);
3698
3699                 /*
3700                  * We might have prevented the cleaner kthread from deleting
3701                  * this block group if it was already unused because we raced
3702                  * and set it to RO mode first. So add it back to the unused
3703                  * list, otherwise it might not ever be deleted unless a manual
3704                  * balance is triggered or it becomes used and unused again.
3705                  */
3706                 spin_lock(&cache->lock);
3707                 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3708                     btrfs_block_group_used(&cache->item) == 0) {
3709                         spin_unlock(&cache->lock);
3710                         spin_lock(&fs_info->unused_bgs_lock);
3711                         if (list_empty(&cache->bg_list)) {
3712                                 btrfs_get_block_group(cache);
3713                                 list_add_tail(&cache->bg_list,
3714                                               &fs_info->unused_bgs);
3715                         }
3716                         spin_unlock(&fs_info->unused_bgs_lock);
3717                 } else {
3718                         spin_unlock(&cache->lock);
3719                 }
3720
3721                 btrfs_put_block_group(cache);
3722                 if (ret)
3723                         break;
3724                 if (is_dev_replace &&
3725                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3726                         ret = -EIO;
3727                         break;
3728                 }
3729                 if (sctx->stat.malloc_errors > 0) {
3730                         ret = -ENOMEM;
3731                         break;
3732                 }
3733 skip:
3734                 key.offset = found_key.offset + length;
3735                 btrfs_release_path(path);
3736         }
3737
3738         btrfs_free_path(path);
3739
3740         return ret;
3741 }
3742
3743 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3744                                            struct btrfs_device *scrub_dev)
3745 {
3746         int     i;
3747         u64     bytenr;
3748         u64     gen;
3749         int     ret;
3750         struct btrfs_root *root = sctx->dev_root;
3751
3752         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3753                 return -EIO;
3754
3755         /* Seed devices of a new filesystem has their own generation. */
3756         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3757                 gen = scrub_dev->generation;
3758         else
3759                 gen = root->fs_info->last_trans_committed;
3760
3761         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3762                 bytenr = btrfs_sb_offset(i);
3763                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3764                     scrub_dev->commit_total_bytes)
3765                         break;
3766
3767                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3768                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3769                                   NULL, 1, bytenr);
3770                 if (ret)
3771                         return ret;
3772         }
3773         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3774
3775         return 0;
3776 }
3777
3778 /*
3779  * get a reference count on fs_info->scrub_workers. start worker if necessary
3780  */
3781 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3782                                                 int is_dev_replace)
3783 {
3784         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3785         int max_active = fs_info->thread_pool_size;
3786
3787         if (fs_info->scrub_workers_refcnt == 0) {
3788                 if (is_dev_replace)
3789                         fs_info->scrub_workers =
3790                                 btrfs_alloc_workqueue(fs_info, "scrub", flags,
3791                                                       1, 4);
3792                 else
3793                         fs_info->scrub_workers =
3794                                 btrfs_alloc_workqueue(fs_info, "scrub", flags,
3795                                                       max_active, 4);
3796                 if (!fs_info->scrub_workers)
3797                         goto fail_scrub_workers;
3798
3799                 fs_info->scrub_wr_completion_workers =
3800                         btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3801                                               max_active, 2);
3802                 if (!fs_info->scrub_wr_completion_workers)
3803                         goto fail_scrub_wr_completion_workers;
3804
3805                 fs_info->scrub_nocow_workers =
3806                         btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
3807                 if (!fs_info->scrub_nocow_workers)
3808                         goto fail_scrub_nocow_workers;
3809                 fs_info->scrub_parity_workers =
3810                         btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3811                                               max_active, 2);
3812                 if (!fs_info->scrub_parity_workers)
3813                         goto fail_scrub_parity_workers;
3814         }
3815         ++fs_info->scrub_workers_refcnt;
3816         return 0;
3817
3818 fail_scrub_parity_workers:
3819         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3820 fail_scrub_nocow_workers:
3821         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3822 fail_scrub_wr_completion_workers:
3823         btrfs_destroy_workqueue(fs_info->scrub_workers);
3824 fail_scrub_workers:
3825         return -ENOMEM;
3826 }
3827
3828 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3829 {
3830         if (--fs_info->scrub_workers_refcnt == 0) {
3831                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3832                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3833                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3834                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3835         }
3836         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3837 }
3838
3839 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3840                     u64 end, struct btrfs_scrub_progress *progress,
3841                     int readonly, int is_dev_replace)
3842 {
3843         struct scrub_ctx *sctx;
3844         int ret;
3845         struct btrfs_device *dev;
3846         struct rcu_string *name;
3847
3848         if (btrfs_fs_closing(fs_info))
3849                 return -EINVAL;
3850
3851         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3852                 /*
3853                  * in this case scrub is unable to calculate the checksum
3854                  * the way scrub is implemented. Do not handle this
3855                  * situation at all because it won't ever happen.
3856                  */
3857                 btrfs_err(fs_info,
3858                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3859                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3860                 return -EINVAL;
3861         }
3862
3863         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3864                 /* not supported for data w/o checksums */
3865                 btrfs_err_rl(fs_info,
3866                            "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3867                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
3868                 return -EINVAL;
3869         }
3870
3871         if (fs_info->chunk_root->nodesize >
3872             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3873             fs_info->chunk_root->sectorsize >
3874             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3875                 /*
3876                  * would exhaust the array bounds of pagev member in
3877                  * struct scrub_block
3878                  */
3879                 btrfs_err(fs_info,
3880                           "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3881                        fs_info->chunk_root->nodesize,
3882                        SCRUB_MAX_PAGES_PER_BLOCK,
3883                        fs_info->chunk_root->sectorsize,
3884                        SCRUB_MAX_PAGES_PER_BLOCK);
3885                 return -EINVAL;
3886         }
3887
3888
3889         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3890         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3891         if (!dev || (dev->missing && !is_dev_replace)) {
3892                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3893                 return -ENODEV;
3894         }
3895
3896         if (!is_dev_replace && !readonly && !dev->writeable) {
3897                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3898                 rcu_read_lock();
3899                 name = rcu_dereference(dev->name);
3900                 btrfs_err(fs_info, "scrub: device %s is not writable",
3901                           name->str);
3902                 rcu_read_unlock();
3903                 return -EROFS;
3904         }
3905
3906         mutex_lock(&fs_info->scrub_lock);
3907         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3908                 mutex_unlock(&fs_info->scrub_lock);
3909                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3910                 return -EIO;
3911         }
3912
3913         btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3914         if (dev->scrub_device ||
3915             (!is_dev_replace &&
3916              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3917                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3918                 mutex_unlock(&fs_info->scrub_lock);
3919                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3920                 return -EINPROGRESS;
3921         }
3922         btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3923
3924         ret = scrub_workers_get(fs_info, is_dev_replace);
3925         if (ret) {
3926                 mutex_unlock(&fs_info->scrub_lock);
3927                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3928                 return ret;
3929         }
3930
3931         sctx = scrub_setup_ctx(dev, is_dev_replace);
3932         if (IS_ERR(sctx)) {
3933                 mutex_unlock(&fs_info->scrub_lock);
3934                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3935                 scrub_workers_put(fs_info);
3936                 return PTR_ERR(sctx);
3937         }
3938         sctx->readonly = readonly;
3939         dev->scrub_device = sctx;
3940         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3941
3942         /*
3943          * checking @scrub_pause_req here, we can avoid
3944          * race between committing transaction and scrubbing.
3945          */
3946         __scrub_blocked_if_needed(fs_info);
3947         atomic_inc(&fs_info->scrubs_running);
3948         mutex_unlock(&fs_info->scrub_lock);
3949
3950         if (!is_dev_replace) {
3951                 /*
3952                  * by holding device list mutex, we can
3953                  * kick off writing super in log tree sync.
3954                  */
3955                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3956                 ret = scrub_supers(sctx, dev);
3957                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3958         }
3959
3960         if (!ret)
3961                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3962                                              is_dev_replace);
3963
3964         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3965         atomic_dec(&fs_info->scrubs_running);
3966         wake_up(&fs_info->scrub_pause_wait);
3967
3968         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3969
3970         if (progress)
3971                 memcpy(progress, &sctx->stat, sizeof(*progress));
3972
3973         mutex_lock(&fs_info->scrub_lock);
3974         dev->scrub_device = NULL;
3975         scrub_workers_put(fs_info);
3976         mutex_unlock(&fs_info->scrub_lock);
3977
3978         scrub_put_ctx(sctx);
3979
3980         return ret;
3981 }
3982
3983 void btrfs_scrub_pause(struct btrfs_root *root)
3984 {
3985         struct btrfs_fs_info *fs_info = root->fs_info;
3986
3987         mutex_lock(&fs_info->scrub_lock);
3988         atomic_inc(&fs_info->scrub_pause_req);
3989         while (atomic_read(&fs_info->scrubs_paused) !=
3990                atomic_read(&fs_info->scrubs_running)) {
3991                 mutex_unlock(&fs_info->scrub_lock);
3992                 wait_event(fs_info->scrub_pause_wait,
3993                            atomic_read(&fs_info->scrubs_paused) ==
3994                            atomic_read(&fs_info->scrubs_running));
3995                 mutex_lock(&fs_info->scrub_lock);
3996         }
3997         mutex_unlock(&fs_info->scrub_lock);
3998 }
3999
4000 void btrfs_scrub_continue(struct btrfs_root *root)
4001 {
4002         struct btrfs_fs_info *fs_info = root->fs_info;
4003
4004         atomic_dec(&fs_info->scrub_pause_req);
4005         wake_up(&fs_info->scrub_pause_wait);
4006 }
4007
4008 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4009 {
4010         mutex_lock(&fs_info->scrub_lock);
4011         if (!atomic_read(&fs_info->scrubs_running)) {
4012                 mutex_unlock(&fs_info->scrub_lock);
4013                 return -ENOTCONN;
4014         }
4015
4016         atomic_inc(&fs_info->scrub_cancel_req);
4017         while (atomic_read(&fs_info->scrubs_running)) {
4018                 mutex_unlock(&fs_info->scrub_lock);
4019                 wait_event(fs_info->scrub_pause_wait,
4020                            atomic_read(&fs_info->scrubs_running) == 0);
4021                 mutex_lock(&fs_info->scrub_lock);
4022         }
4023         atomic_dec(&fs_info->scrub_cancel_req);
4024         mutex_unlock(&fs_info->scrub_lock);
4025
4026         return 0;
4027 }
4028
4029 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4030                            struct btrfs_device *dev)
4031 {
4032         struct scrub_ctx *sctx;
4033
4034         mutex_lock(&fs_info->scrub_lock);
4035         sctx = dev->scrub_device;
4036         if (!sctx) {
4037                 mutex_unlock(&fs_info->scrub_lock);
4038                 return -ENOTCONN;
4039         }
4040         atomic_inc(&sctx->cancel_req);
4041         while (dev->scrub_device) {
4042                 mutex_unlock(&fs_info->scrub_lock);
4043                 wait_event(fs_info->scrub_pause_wait,
4044                            dev->scrub_device == NULL);
4045                 mutex_lock(&fs_info->scrub_lock);
4046         }
4047         mutex_unlock(&fs_info->scrub_lock);
4048
4049         return 0;
4050 }
4051
4052 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4053                          struct btrfs_scrub_progress *progress)
4054 {
4055         struct btrfs_device *dev;
4056         struct scrub_ctx *sctx = NULL;
4057
4058         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
4059         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
4060         if (dev)
4061                 sctx = dev->scrub_device;
4062         if (sctx)
4063                 memcpy(progress, &sctx->stat, sizeof(*progress));
4064         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4065
4066         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4067 }
4068
4069 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4070                                u64 extent_logical, u64 extent_len,
4071                                u64 *extent_physical,
4072                                struct btrfs_device **extent_dev,
4073                                int *extent_mirror_num)
4074 {
4075         u64 mapped_length;
4076         struct btrfs_bio *bbio = NULL;
4077         int ret;
4078
4079         mapped_length = extent_len;
4080         ret = btrfs_map_block(fs_info, READ, extent_logical,
4081                               &mapped_length, &bbio, 0);
4082         if (ret || !bbio || mapped_length < extent_len ||
4083             !bbio->stripes[0].dev->bdev) {
4084                 btrfs_put_bbio(bbio);
4085                 return;
4086         }
4087
4088         *extent_physical = bbio->stripes[0].physical;
4089         *extent_mirror_num = bbio->mirror_num;
4090         *extent_dev = bbio->stripes[0].dev;
4091         btrfs_put_bbio(bbio);
4092 }
4093
4094 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4095                               struct scrub_wr_ctx *wr_ctx,
4096                               struct btrfs_fs_info *fs_info,
4097                               struct btrfs_device *dev,
4098                               int is_dev_replace)
4099 {
4100         WARN_ON(wr_ctx->wr_curr_bio != NULL);
4101
4102         mutex_init(&wr_ctx->wr_lock);
4103         wr_ctx->wr_curr_bio = NULL;
4104         if (!is_dev_replace)
4105                 return 0;
4106
4107         WARN_ON(!dev->bdev);
4108         wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4109         wr_ctx->tgtdev = dev;
4110         atomic_set(&wr_ctx->flush_all_writes, 0);
4111         return 0;
4112 }
4113
4114 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4115 {
4116         mutex_lock(&wr_ctx->wr_lock);
4117         kfree(wr_ctx->wr_curr_bio);
4118         wr_ctx->wr_curr_bio = NULL;
4119         mutex_unlock(&wr_ctx->wr_lock);
4120 }
4121
4122 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4123                             int mirror_num, u64 physical_for_dev_replace)
4124 {
4125         struct scrub_copy_nocow_ctx *nocow_ctx;
4126         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4127
4128         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4129         if (!nocow_ctx) {
4130                 spin_lock(&sctx->stat_lock);
4131                 sctx->stat.malloc_errors++;
4132                 spin_unlock(&sctx->stat_lock);
4133                 return -ENOMEM;
4134         }
4135
4136         scrub_pending_trans_workers_inc(sctx);
4137
4138         nocow_ctx->sctx = sctx;
4139         nocow_ctx->logical = logical;
4140         nocow_ctx->len = len;
4141         nocow_ctx->mirror_num = mirror_num;
4142         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4143         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4144                         copy_nocow_pages_worker, NULL, NULL);
4145         INIT_LIST_HEAD(&nocow_ctx->inodes);
4146         btrfs_queue_work(fs_info->scrub_nocow_workers,
4147                          &nocow_ctx->work);
4148
4149         return 0;
4150 }
4151
4152 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4153 {
4154         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4155         struct scrub_nocow_inode *nocow_inode;
4156
4157         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4158         if (!nocow_inode)
4159                 return -ENOMEM;
4160         nocow_inode->inum = inum;
4161         nocow_inode->offset = offset;
4162         nocow_inode->root = root;
4163         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4164         return 0;
4165 }
4166
4167 #define COPY_COMPLETE 1
4168
4169 static void copy_nocow_pages_worker(struct btrfs_work *work)
4170 {
4171         struct scrub_copy_nocow_ctx *nocow_ctx =
4172                 container_of(work, struct scrub_copy_nocow_ctx, work);
4173         struct scrub_ctx *sctx = nocow_ctx->sctx;
4174         u64 logical = nocow_ctx->logical;
4175         u64 len = nocow_ctx->len;
4176         int mirror_num = nocow_ctx->mirror_num;
4177         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4178         int ret;
4179         struct btrfs_trans_handle *trans = NULL;
4180         struct btrfs_fs_info *fs_info;
4181         struct btrfs_path *path;
4182         struct btrfs_root *root;
4183         int not_written = 0;
4184
4185         fs_info = sctx->dev_root->fs_info;
4186         root = fs_info->extent_root;
4187
4188         path = btrfs_alloc_path();
4189         if (!path) {
4190                 spin_lock(&sctx->stat_lock);
4191                 sctx->stat.malloc_errors++;
4192                 spin_unlock(&sctx->stat_lock);
4193                 not_written = 1;
4194                 goto out;
4195         }
4196
4197         trans = btrfs_join_transaction(root);
4198         if (IS_ERR(trans)) {
4199                 not_written = 1;
4200                 goto out;
4201         }
4202
4203         ret = iterate_inodes_from_logical(logical, fs_info, path,
4204                                           record_inode_for_nocow, nocow_ctx);
4205         if (ret != 0 && ret != -ENOENT) {
4206                 btrfs_warn(fs_info,
4207                            "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4208                            logical, physical_for_dev_replace, len, mirror_num,
4209                            ret);
4210                 not_written = 1;
4211                 goto out;
4212         }
4213
4214         btrfs_end_transaction(trans, root);
4215         trans = NULL;
4216         while (!list_empty(&nocow_ctx->inodes)) {
4217                 struct scrub_nocow_inode *entry;
4218                 entry = list_first_entry(&nocow_ctx->inodes,
4219                                          struct scrub_nocow_inode,
4220                                          list);
4221                 list_del_init(&entry->list);
4222                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4223                                                  entry->root, nocow_ctx);
4224                 kfree(entry);
4225                 if (ret == COPY_COMPLETE) {
4226                         ret = 0;
4227                         break;
4228                 } else if (ret) {
4229                         break;
4230                 }
4231         }
4232 out:
4233         while (!list_empty(&nocow_ctx->inodes)) {
4234                 struct scrub_nocow_inode *entry;
4235                 entry = list_first_entry(&nocow_ctx->inodes,
4236                                          struct scrub_nocow_inode,
4237                                          list);
4238                 list_del_init(&entry->list);
4239                 kfree(entry);
4240         }
4241         if (trans && !IS_ERR(trans))
4242                 btrfs_end_transaction(trans, root);
4243         if (not_written)
4244                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4245                                             num_uncorrectable_read_errors);
4246
4247         btrfs_free_path(path);
4248         kfree(nocow_ctx);
4249
4250         scrub_pending_trans_workers_dec(sctx);
4251 }
4252
4253 static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4254                                  u64 logical)
4255 {
4256         struct extent_state *cached_state = NULL;
4257         struct btrfs_ordered_extent *ordered;
4258         struct extent_io_tree *io_tree;
4259         struct extent_map *em;
4260         u64 lockstart = start, lockend = start + len - 1;
4261         int ret = 0;
4262
4263         io_tree = &BTRFS_I(inode)->io_tree;
4264
4265         lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4266         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4267         if (ordered) {
4268                 btrfs_put_ordered_extent(ordered);
4269                 ret = 1;
4270                 goto out_unlock;
4271         }
4272
4273         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4274         if (IS_ERR(em)) {
4275                 ret = PTR_ERR(em);
4276                 goto out_unlock;
4277         }
4278
4279         /*
4280          * This extent does not actually cover the logical extent anymore,
4281          * move on to the next inode.
4282          */
4283         if (em->block_start > logical ||
4284             em->block_start + em->block_len < logical + len) {
4285                 free_extent_map(em);
4286                 ret = 1;
4287                 goto out_unlock;
4288         }
4289         free_extent_map(em);
4290
4291 out_unlock:
4292         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4293                              GFP_NOFS);
4294         return ret;
4295 }
4296
4297 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4298                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4299 {
4300         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4301         struct btrfs_key key;
4302         struct inode *inode;
4303         struct page *page;
4304         struct btrfs_root *local_root;
4305         struct extent_io_tree *io_tree;
4306         u64 physical_for_dev_replace;
4307         u64 nocow_ctx_logical;
4308         u64 len = nocow_ctx->len;
4309         unsigned long index;
4310         int srcu_index;
4311         int ret = 0;
4312         int err = 0;
4313
4314         key.objectid = root;
4315         key.type = BTRFS_ROOT_ITEM_KEY;
4316         key.offset = (u64)-1;
4317
4318         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4319
4320         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4321         if (IS_ERR(local_root)) {
4322                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4323                 return PTR_ERR(local_root);
4324         }
4325
4326         key.type = BTRFS_INODE_ITEM_KEY;
4327         key.objectid = inum;
4328         key.offset = 0;
4329         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4330         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4331         if (IS_ERR(inode))
4332                 return PTR_ERR(inode);
4333
4334         /* Avoid truncate/dio/punch hole.. */
4335         inode_lock(inode);
4336         inode_dio_wait(inode);
4337
4338         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4339         io_tree = &BTRFS_I(inode)->io_tree;
4340         nocow_ctx_logical = nocow_ctx->logical;
4341
4342         ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4343         if (ret) {
4344                 ret = ret > 0 ? 0 : ret;
4345                 goto out;
4346         }
4347
4348         while (len >= PAGE_SIZE) {
4349                 index = offset >> PAGE_SHIFT;
4350 again:
4351                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4352                 if (!page) {
4353                         btrfs_err(fs_info, "find_or_create_page() failed");
4354                         ret = -ENOMEM;
4355                         goto out;
4356                 }
4357
4358                 if (PageUptodate(page)) {
4359                         if (PageDirty(page))
4360                                 goto next_page;
4361                 } else {
4362                         ClearPageError(page);
4363                         err = extent_read_full_page(io_tree, page,
4364                                                            btrfs_get_extent,
4365                                                            nocow_ctx->mirror_num);
4366                         if (err) {
4367                                 ret = err;
4368                                 goto next_page;
4369                         }
4370
4371                         lock_page(page);
4372                         /*
4373                          * If the page has been remove from the page cache,
4374                          * the data on it is meaningless, because it may be
4375                          * old one, the new data may be written into the new
4376                          * page in the page cache.
4377                          */
4378                         if (page->mapping != inode->i_mapping) {
4379                                 unlock_page(page);
4380                                 put_page(page);
4381                                 goto again;
4382                         }
4383                         if (!PageUptodate(page)) {
4384                                 ret = -EIO;
4385                                 goto next_page;
4386                         }
4387                 }
4388
4389                 ret = check_extent_to_block(inode, offset, len,
4390                                             nocow_ctx_logical);
4391                 if (ret) {
4392                         ret = ret > 0 ? 0 : ret;
4393                         goto next_page;
4394                 }
4395
4396                 err = write_page_nocow(nocow_ctx->sctx,
4397                                        physical_for_dev_replace, page);
4398                 if (err)
4399                         ret = err;
4400 next_page:
4401                 unlock_page(page);
4402                 put_page(page);
4403
4404                 if (ret)
4405                         break;
4406
4407                 offset += PAGE_SIZE;
4408                 physical_for_dev_replace += PAGE_SIZE;
4409                 nocow_ctx_logical += PAGE_SIZE;
4410                 len -= PAGE_SIZE;
4411         }
4412         ret = COPY_COMPLETE;
4413 out:
4414         inode_unlock(inode);
4415         iput(inode);
4416         return ret;
4417 }
4418
4419 static int write_page_nocow(struct scrub_ctx *sctx,
4420                             u64 physical_for_dev_replace, struct page *page)
4421 {
4422         struct bio *bio;
4423         struct btrfs_device *dev;
4424         int ret;
4425
4426         dev = sctx->wr_ctx.tgtdev;
4427         if (!dev)
4428                 return -EIO;
4429         if (!dev->bdev) {
4430                 btrfs_warn_rl(dev->dev_root->fs_info,
4431                         "scrub write_page_nocow(bdev == NULL) is unexpected");
4432                 return -EIO;
4433         }
4434         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4435         if (!bio) {
4436                 spin_lock(&sctx->stat_lock);
4437                 sctx->stat.malloc_errors++;
4438                 spin_unlock(&sctx->stat_lock);
4439                 return -ENOMEM;
4440         }
4441         bio->bi_iter.bi_size = 0;
4442         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4443         bio->bi_bdev = dev->bdev;
4444         bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
4445         ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4446         if (ret != PAGE_SIZE) {
4447 leave_with_eio:
4448                 bio_put(bio);
4449                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4450                 return -EIO;
4451         }
4452
4453         if (btrfsic_submit_bio_wait(bio))
4454                 goto leave_with_eio;
4455
4456         bio_put(bio);
4457         return 0;
4458 }