GNU Linux-libre 5.10.219-gnu1
[releases.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37         [BTRFS_RAID_RAID10] = {
38                 .sub_stripes    = 2,
39                 .dev_stripes    = 1,
40                 .devs_max       = 0,    /* 0 == as many as possible */
41                 .devs_min       = 4,
42                 .tolerated_failures = 1,
43                 .devs_increment = 2,
44                 .ncopies        = 2,
45                 .nparity        = 0,
46                 .raid_name      = "raid10",
47                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
48                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49         },
50         [BTRFS_RAID_RAID1] = {
51                 .sub_stripes    = 1,
52                 .dev_stripes    = 1,
53                 .devs_max       = 2,
54                 .devs_min       = 2,
55                 .tolerated_failures = 1,
56                 .devs_increment = 2,
57                 .ncopies        = 2,
58                 .nparity        = 0,
59                 .raid_name      = "raid1",
60                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
61                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62         },
63         [BTRFS_RAID_RAID1C3] = {
64                 .sub_stripes    = 1,
65                 .dev_stripes    = 1,
66                 .devs_max       = 3,
67                 .devs_min       = 3,
68                 .tolerated_failures = 2,
69                 .devs_increment = 3,
70                 .ncopies        = 3,
71                 .nparity        = 0,
72                 .raid_name      = "raid1c3",
73                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C3,
74                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75         },
76         [BTRFS_RAID_RAID1C4] = {
77                 .sub_stripes    = 1,
78                 .dev_stripes    = 1,
79                 .devs_max       = 4,
80                 .devs_min       = 4,
81                 .tolerated_failures = 3,
82                 .devs_increment = 4,
83                 .ncopies        = 4,
84                 .nparity        = 0,
85                 .raid_name      = "raid1c4",
86                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C4,
87                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88         },
89         [BTRFS_RAID_DUP] = {
90                 .sub_stripes    = 1,
91                 .dev_stripes    = 2,
92                 .devs_max       = 1,
93                 .devs_min       = 1,
94                 .tolerated_failures = 0,
95                 .devs_increment = 1,
96                 .ncopies        = 2,
97                 .nparity        = 0,
98                 .raid_name      = "dup",
99                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
100                 .mindev_error   = 0,
101         },
102         [BTRFS_RAID_RAID0] = {
103                 .sub_stripes    = 1,
104                 .dev_stripes    = 1,
105                 .devs_max       = 0,
106                 .devs_min       = 2,
107                 .tolerated_failures = 0,
108                 .devs_increment = 1,
109                 .ncopies        = 1,
110                 .nparity        = 0,
111                 .raid_name      = "raid0",
112                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
113                 .mindev_error   = 0,
114         },
115         [BTRFS_RAID_SINGLE] = {
116                 .sub_stripes    = 1,
117                 .dev_stripes    = 1,
118                 .devs_max       = 1,
119                 .devs_min       = 1,
120                 .tolerated_failures = 0,
121                 .devs_increment = 1,
122                 .ncopies        = 1,
123                 .nparity        = 0,
124                 .raid_name      = "single",
125                 .bg_flag        = 0,
126                 .mindev_error   = 0,
127         },
128         [BTRFS_RAID_RAID5] = {
129                 .sub_stripes    = 1,
130                 .dev_stripes    = 1,
131                 .devs_max       = 0,
132                 .devs_min       = 2,
133                 .tolerated_failures = 1,
134                 .devs_increment = 1,
135                 .ncopies        = 1,
136                 .nparity        = 1,
137                 .raid_name      = "raid5",
138                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
139                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140         },
141         [BTRFS_RAID_RAID6] = {
142                 .sub_stripes    = 1,
143                 .dev_stripes    = 1,
144                 .devs_max       = 0,
145                 .devs_min       = 3,
146                 .tolerated_failures = 2,
147                 .devs_increment = 1,
148                 .ncopies        = 1,
149                 .nparity        = 2,
150                 .raid_name      = "raid6",
151                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
152                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153         },
154 };
155
156 const char *btrfs_bg_type_to_raid_name(u64 flags)
157 {
158         const int index = btrfs_bg_flags_to_raid_index(flags);
159
160         if (index >= BTRFS_NR_RAID_TYPES)
161                 return NULL;
162
163         return btrfs_raid_array[index].raid_name;
164 }
165
166 /*
167  * Fill @buf with textual description of @bg_flags, no more than @size_buf
168  * bytes including terminating null byte.
169  */
170 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
171 {
172         int i;
173         int ret;
174         char *bp = buf;
175         u64 flags = bg_flags;
176         u32 size_bp = size_buf;
177
178         if (!flags) {
179                 strcpy(bp, "NONE");
180                 return;
181         }
182
183 #define DESCRIBE_FLAG(flag, desc)                                               \
184         do {                                                            \
185                 if (flags & (flag)) {                                   \
186                         ret = snprintf(bp, size_bp, "%s|", (desc));     \
187                         if (ret < 0 || ret >= size_bp)                  \
188                                 goto out_overflow;                      \
189                         size_bp -= ret;                                 \
190                         bp += ret;                                      \
191                         flags &= ~(flag);                               \
192                 }                                                       \
193         } while (0)
194
195         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
196         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
197         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
198
199         DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
200         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
201                 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
202                               btrfs_raid_array[i].raid_name);
203 #undef DESCRIBE_FLAG
204
205         if (flags) {
206                 ret = snprintf(bp, size_bp, "0x%llx|", flags);
207                 size_bp -= ret;
208         }
209
210         if (size_bp < size_buf)
211                 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
212
213         /*
214          * The text is trimmed, it's up to the caller to provide sufficiently
215          * large buffer
216          */
217 out_overflow:;
218 }
219
220 static int init_first_rw_device(struct btrfs_trans_handle *trans);
221 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
222 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
223 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
224 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
225                              enum btrfs_map_op op,
226                              u64 logical, u64 *length,
227                              struct btrfs_bio **bbio_ret,
228                              int mirror_num, int need_raid_map);
229
230 /*
231  * Device locking
232  * ==============
233  *
234  * There are several mutexes that protect manipulation of devices and low-level
235  * structures like chunks but not block groups, extents or files
236  *
237  * uuid_mutex (global lock)
238  * ------------------------
239  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
240  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
241  * device) or requested by the device= mount option
242  *
243  * the mutex can be very coarse and can cover long-running operations
244  *
245  * protects: updates to fs_devices counters like missing devices, rw devices,
246  * seeding, structure cloning, opening/closing devices at mount/umount time
247  *
248  * global::fs_devs - add, remove, updates to the global list
249  *
250  * does not protect: manipulation of the fs_devices::devices list in general
251  * but in mount context it could be used to exclude list modifications by eg.
252  * scan ioctl
253  *
254  * btrfs_device::name - renames (write side), read is RCU
255  *
256  * fs_devices::device_list_mutex (per-fs, with RCU)
257  * ------------------------------------------------
258  * protects updates to fs_devices::devices, ie. adding and deleting
259  *
260  * simple list traversal with read-only actions can be done with RCU protection
261  *
262  * may be used to exclude some operations from running concurrently without any
263  * modifications to the list (see write_all_supers)
264  *
265  * Is not required at mount and close times, because our device list is
266  * protected by the uuid_mutex at that point.
267  *
268  * balance_mutex
269  * -------------
270  * protects balance structures (status, state) and context accessed from
271  * several places (internally, ioctl)
272  *
273  * chunk_mutex
274  * -----------
275  * protects chunks, adding or removing during allocation, trim or when a new
276  * device is added/removed. Additionally it also protects post_commit_list of
277  * individual devices, since they can be added to the transaction's
278  * post_commit_list only with chunk_mutex held.
279  *
280  * cleaner_mutex
281  * -------------
282  * a big lock that is held by the cleaner thread and prevents running subvolume
283  * cleaning together with relocation or delayed iputs
284  *
285  *
286  * Lock nesting
287  * ============
288  *
289  * uuid_mutex
290  *   device_list_mutex
291  *     chunk_mutex
292  *   balance_mutex
293  *
294  *
295  * Exclusive operations
296  * ====================
297  *
298  * Maintains the exclusivity of the following operations that apply to the
299  * whole filesystem and cannot run in parallel.
300  *
301  * - Balance (*)
302  * - Device add
303  * - Device remove
304  * - Device replace (*)
305  * - Resize
306  *
307  * The device operations (as above) can be in one of the following states:
308  *
309  * - Running state
310  * - Paused state
311  * - Completed state
312  *
313  * Only device operations marked with (*) can go into the Paused state for the
314  * following reasons:
315  *
316  * - ioctl (only Balance can be Paused through ioctl)
317  * - filesystem remounted as read-only
318  * - filesystem unmounted and mounted as read-only
319  * - system power-cycle and filesystem mounted as read-only
320  * - filesystem or device errors leading to forced read-only
321  *
322  * The status of exclusive operation is set and cleared atomically.
323  * During the course of Paused state, fs_info::exclusive_operation remains set.
324  * A device operation in Paused or Running state can be canceled or resumed
325  * either by ioctl (Balance only) or when remounted as read-write.
326  * The exclusive status is cleared when the device operation is canceled or
327  * completed.
328  */
329
330 DEFINE_MUTEX(uuid_mutex);
331 static LIST_HEAD(fs_uuids);
332 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
333 {
334         return &fs_uuids;
335 }
336
337 /*
338  * alloc_fs_devices - allocate struct btrfs_fs_devices
339  * @fsid:               if not NULL, copy the UUID to fs_devices::fsid
340  * @metadata_fsid:      if not NULL, copy the UUID to fs_devices::metadata_fsid
341  *
342  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
343  * The returned struct is not linked onto any lists and can be destroyed with
344  * kfree() right away.
345  */
346 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
347                                                  const u8 *metadata_fsid)
348 {
349         struct btrfs_fs_devices *fs_devs;
350
351         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
352         if (!fs_devs)
353                 return ERR_PTR(-ENOMEM);
354
355         mutex_init(&fs_devs->device_list_mutex);
356
357         INIT_LIST_HEAD(&fs_devs->devices);
358         INIT_LIST_HEAD(&fs_devs->alloc_list);
359         INIT_LIST_HEAD(&fs_devs->fs_list);
360         INIT_LIST_HEAD(&fs_devs->seed_list);
361         if (fsid)
362                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
363
364         if (metadata_fsid)
365                 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
366         else if (fsid)
367                 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
368
369         return fs_devs;
370 }
371
372 void btrfs_free_device(struct btrfs_device *device)
373 {
374         WARN_ON(!list_empty(&device->post_commit_list));
375         rcu_string_free(device->name);
376         extent_io_tree_release(&device->alloc_state);
377         bio_put(device->flush_bio);
378         kfree(device);
379 }
380
381 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
382 {
383         struct btrfs_device *device;
384
385         WARN_ON(fs_devices->opened);
386         while (!list_empty(&fs_devices->devices)) {
387                 device = list_entry(fs_devices->devices.next,
388                                     struct btrfs_device, dev_list);
389                 list_del(&device->dev_list);
390                 btrfs_free_device(device);
391         }
392         kfree(fs_devices);
393 }
394
395 void __exit btrfs_cleanup_fs_uuids(void)
396 {
397         struct btrfs_fs_devices *fs_devices;
398
399         while (!list_empty(&fs_uuids)) {
400                 fs_devices = list_entry(fs_uuids.next,
401                                         struct btrfs_fs_devices, fs_list);
402                 list_del(&fs_devices->fs_list);
403                 free_fs_devices(fs_devices);
404         }
405 }
406
407 /*
408  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
409  * Returned struct is not linked onto any lists and must be destroyed using
410  * btrfs_free_device.
411  */
412 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
413 {
414         struct btrfs_device *dev;
415
416         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
417         if (!dev)
418                 return ERR_PTR(-ENOMEM);
419
420         /*
421          * Preallocate a bio that's always going to be used for flushing device
422          * barriers and matches the device lifespan
423          */
424         dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
425         if (!dev->flush_bio) {
426                 kfree(dev);
427                 return ERR_PTR(-ENOMEM);
428         }
429
430         INIT_LIST_HEAD(&dev->dev_list);
431         INIT_LIST_HEAD(&dev->dev_alloc_list);
432         INIT_LIST_HEAD(&dev->post_commit_list);
433
434         atomic_set(&dev->reada_in_flight, 0);
435         atomic_set(&dev->dev_stats_ccnt, 0);
436         btrfs_device_data_ordered_init(dev);
437         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
438         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
439         extent_io_tree_init(fs_info, &dev->alloc_state,
440                             IO_TREE_DEVICE_ALLOC_STATE, NULL);
441
442         return dev;
443 }
444
445 static noinline struct btrfs_fs_devices *find_fsid(
446                 const u8 *fsid, const u8 *metadata_fsid)
447 {
448         struct btrfs_fs_devices *fs_devices;
449
450         ASSERT(fsid);
451
452         /* Handle non-split brain cases */
453         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454                 if (metadata_fsid) {
455                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
456                             && memcmp(metadata_fsid, fs_devices->metadata_uuid,
457                                       BTRFS_FSID_SIZE) == 0)
458                                 return fs_devices;
459                 } else {
460                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
461                                 return fs_devices;
462                 }
463         }
464         return NULL;
465 }
466
467 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
468                                 struct btrfs_super_block *disk_super)
469 {
470
471         struct btrfs_fs_devices *fs_devices;
472
473         /*
474          * Handle scanned device having completed its fsid change but
475          * belonging to a fs_devices that was created by first scanning
476          * a device which didn't have its fsid/metadata_uuid changed
477          * at all and the CHANGING_FSID_V2 flag set.
478          */
479         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
480                 if (fs_devices->fsid_change &&
481                     memcmp(disk_super->metadata_uuid, fs_devices->fsid,
482                            BTRFS_FSID_SIZE) == 0 &&
483                     memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
484                            BTRFS_FSID_SIZE) == 0) {
485                         return fs_devices;
486                 }
487         }
488         /*
489          * Handle scanned device having completed its fsid change but
490          * belonging to a fs_devices that was created by a device that
491          * has an outdated pair of fsid/metadata_uuid and
492          * CHANGING_FSID_V2 flag set.
493          */
494         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
495                 if (fs_devices->fsid_change &&
496                     memcmp(fs_devices->metadata_uuid,
497                            fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
498                     memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
499                            BTRFS_FSID_SIZE) == 0) {
500                         return fs_devices;
501                 }
502         }
503
504         return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
505 }
506
507
508 static int
509 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
510                       int flush, struct block_device **bdev,
511                       struct btrfs_super_block **disk_super)
512 {
513         int ret;
514
515         *bdev = blkdev_get_by_path(device_path, flags, holder);
516
517         if (IS_ERR(*bdev)) {
518                 ret = PTR_ERR(*bdev);
519                 goto error;
520         }
521
522         if (flush)
523                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
524         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
525         if (ret) {
526                 blkdev_put(*bdev, flags);
527                 goto error;
528         }
529         invalidate_bdev(*bdev);
530         *disk_super = btrfs_read_dev_super(*bdev);
531         if (IS_ERR(*disk_super)) {
532                 ret = PTR_ERR(*disk_super);
533                 blkdev_put(*bdev, flags);
534                 goto error;
535         }
536
537         return 0;
538
539 error:
540         *bdev = NULL;
541         return ret;
542 }
543
544 /*
545  * Check if the device in the path matches the device in the given struct device.
546  *
547  * Returns:
548  *   true  If it is the same device.
549  *   false If it is not the same device or on error.
550  */
551 static bool device_matched(const struct btrfs_device *device, const char *path)
552 {
553         char *device_name;
554         struct block_device *bdev_old;
555         struct block_device *bdev_new;
556
557         /*
558          * If we are looking for a device with the matching dev_t, then skip
559          * device without a name (a missing device).
560          */
561         if (!device->name)
562                 return false;
563
564         device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
565         if (!device_name)
566                 return false;
567
568         rcu_read_lock();
569         scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
570         rcu_read_unlock();
571
572         bdev_old = lookup_bdev(device_name);
573         kfree(device_name);
574         if (IS_ERR(bdev_old))
575                 return false;
576
577         bdev_new = lookup_bdev(path);
578         if (IS_ERR(bdev_new))
579                 return false;
580
581         if (bdev_old == bdev_new)
582                 return true;
583
584         return false;
585 }
586
587 /*
588  *  Search and remove all stale (devices which are not mounted) devices.
589  *  When both inputs are NULL, it will search and release all stale devices.
590  *  path:       Optional. When provided will it release all unmounted devices
591  *              matching this path only.
592  *  skip_dev:   Optional. Will skip this device when searching for the stale
593  *              devices.
594  *  Return:     0 for success or if @path is NULL.
595  *              -EBUSY if @path is a mounted device.
596  *              -ENOENT if @path does not match any device in the list.
597  */
598 static int btrfs_free_stale_devices(const char *path,
599                                      struct btrfs_device *skip_device)
600 {
601         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
602         struct btrfs_device *device, *tmp_device;
603         int ret = 0;
604
605         lockdep_assert_held(&uuid_mutex);
606
607         if (path)
608                 ret = -ENOENT;
609
610         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
611
612                 mutex_lock(&fs_devices->device_list_mutex);
613                 list_for_each_entry_safe(device, tmp_device,
614                                          &fs_devices->devices, dev_list) {
615                         if (skip_device && skip_device == device)
616                                 continue;
617                         if (path && !device_matched(device, path))
618                                 continue;
619                         if (fs_devices->opened) {
620                                 /* for an already deleted device return 0 */
621                                 if (path && ret != 0)
622                                         ret = -EBUSY;
623                                 break;
624                         }
625
626                         /* delete the stale device */
627                         fs_devices->num_devices--;
628                         list_del(&device->dev_list);
629                         btrfs_free_device(device);
630
631                         ret = 0;
632                 }
633                 mutex_unlock(&fs_devices->device_list_mutex);
634
635                 if (fs_devices->num_devices == 0) {
636                         btrfs_sysfs_remove_fsid(fs_devices);
637                         list_del(&fs_devices->fs_list);
638                         free_fs_devices(fs_devices);
639                 }
640         }
641
642         return ret;
643 }
644
645 /*
646  * This is only used on mount, and we are protected from competing things
647  * messing with our fs_devices by the uuid_mutex, thus we do not need the
648  * fs_devices->device_list_mutex here.
649  */
650 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
651                         struct btrfs_device *device, fmode_t flags,
652                         void *holder)
653 {
654         struct request_queue *q;
655         struct block_device *bdev;
656         struct btrfs_super_block *disk_super;
657         u64 devid;
658         int ret;
659
660         if (device->bdev)
661                 return -EINVAL;
662         if (!device->name)
663                 return -EINVAL;
664
665         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
666                                     &bdev, &disk_super);
667         if (ret)
668                 return ret;
669
670         devid = btrfs_stack_device_id(&disk_super->dev_item);
671         if (devid != device->devid)
672                 goto error_free_page;
673
674         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
675                 goto error_free_page;
676
677         device->generation = btrfs_super_generation(disk_super);
678
679         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
680                 if (btrfs_super_incompat_flags(disk_super) &
681                     BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
682                         pr_err(
683                 "BTRFS: Invalid seeding and uuid-changed device detected\n");
684                         goto error_free_page;
685                 }
686
687                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
688                 fs_devices->seeding = true;
689         } else {
690                 if (bdev_read_only(bdev))
691                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
692                 else
693                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
694         }
695
696         q = bdev_get_queue(bdev);
697         if (!blk_queue_nonrot(q))
698                 fs_devices->rotating = true;
699
700         device->bdev = bdev;
701         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
702         device->mode = flags;
703
704         fs_devices->open_devices++;
705         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
706             device->devid != BTRFS_DEV_REPLACE_DEVID) {
707                 fs_devices->rw_devices++;
708                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
709         }
710         btrfs_release_disk_super(disk_super);
711
712         return 0;
713
714 error_free_page:
715         btrfs_release_disk_super(disk_super);
716         blkdev_put(bdev, flags);
717
718         return -EINVAL;
719 }
720
721 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
722 {
723         bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
724                                   BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
725
726         return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
727 }
728
729 /*
730  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
731  * being created with a disk that has already completed its fsid change. Such
732  * disk can belong to an fs which has its FSID changed or to one which doesn't.
733  * Handle both cases here.
734  */
735 static struct btrfs_fs_devices *find_fsid_inprogress(
736                                         struct btrfs_super_block *disk_super)
737 {
738         struct btrfs_fs_devices *fs_devices;
739
740         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
741                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
742                            BTRFS_FSID_SIZE) != 0 &&
743                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
744                            BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
745                         return fs_devices;
746                 }
747         }
748
749         return find_fsid(disk_super->fsid, NULL);
750 }
751
752
753 static struct btrfs_fs_devices *find_fsid_changed(
754                                         struct btrfs_super_block *disk_super)
755 {
756         struct btrfs_fs_devices *fs_devices;
757
758         /*
759          * Handles the case where scanned device is part of an fs that had
760          * multiple successful changes of FSID but curently device didn't
761          * observe it. Meaning our fsid will be different than theirs. We need
762          * to handle two subcases :
763          *  1 - The fs still continues to have different METADATA/FSID uuids.
764          *  2 - The fs is switched back to its original FSID (METADATA/FSID
765          *  are equal).
766          */
767         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
768                 /* Changed UUIDs */
769                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
770                            BTRFS_FSID_SIZE) != 0 &&
771                     memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
772                            BTRFS_FSID_SIZE) == 0 &&
773                     memcmp(fs_devices->fsid, disk_super->fsid,
774                            BTRFS_FSID_SIZE) != 0)
775                         return fs_devices;
776
777                 /* Unchanged UUIDs */
778                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
779                            BTRFS_FSID_SIZE) == 0 &&
780                     memcmp(fs_devices->fsid, disk_super->metadata_uuid,
781                            BTRFS_FSID_SIZE) == 0)
782                         return fs_devices;
783         }
784
785         return NULL;
786 }
787
788 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
789                                 struct btrfs_super_block *disk_super)
790 {
791         struct btrfs_fs_devices *fs_devices;
792
793         /*
794          * Handle the case where the scanned device is part of an fs whose last
795          * metadata UUID change reverted it to the original FSID. At the same
796          * time * fs_devices was first created by another constitutent device
797          * which didn't fully observe the operation. This results in an
798          * btrfs_fs_devices created with metadata/fsid different AND
799          * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
800          * fs_devices equal to the FSID of the disk.
801          */
802         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
803                 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
804                            BTRFS_FSID_SIZE) != 0 &&
805                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
806                            BTRFS_FSID_SIZE) == 0 &&
807                     fs_devices->fsid_change)
808                         return fs_devices;
809         }
810
811         return NULL;
812 }
813 /*
814  * Add new device to list of registered devices
815  *
816  * Returns:
817  * device pointer which was just added or updated when successful
818  * error pointer when failed
819  */
820 static noinline struct btrfs_device *device_list_add(const char *path,
821                            struct btrfs_super_block *disk_super,
822                            bool *new_device_added)
823 {
824         struct btrfs_device *device;
825         struct btrfs_fs_devices *fs_devices = NULL;
826         struct rcu_string *name;
827         u64 found_transid = btrfs_super_generation(disk_super);
828         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
829         bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
830                 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
831         bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
832                                         BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
833
834         if (fsid_change_in_progress) {
835                 if (!has_metadata_uuid)
836                         fs_devices = find_fsid_inprogress(disk_super);
837                 else
838                         fs_devices = find_fsid_changed(disk_super);
839         } else if (has_metadata_uuid) {
840                 fs_devices = find_fsid_with_metadata_uuid(disk_super);
841         } else {
842                 fs_devices = find_fsid_reverted_metadata(disk_super);
843                 if (!fs_devices)
844                         fs_devices = find_fsid(disk_super->fsid, NULL);
845         }
846
847
848         if (!fs_devices) {
849                 if (has_metadata_uuid)
850                         fs_devices = alloc_fs_devices(disk_super->fsid,
851                                                       disk_super->metadata_uuid);
852                 else
853                         fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
854
855                 if (IS_ERR(fs_devices))
856                         return ERR_CAST(fs_devices);
857
858                 fs_devices->fsid_change = fsid_change_in_progress;
859
860                 mutex_lock(&fs_devices->device_list_mutex);
861                 list_add(&fs_devices->fs_list, &fs_uuids);
862
863                 device = NULL;
864         } else {
865                 mutex_lock(&fs_devices->device_list_mutex);
866                 device = btrfs_find_device(fs_devices, devid,
867                                 disk_super->dev_item.uuid, NULL, false);
868
869                 /*
870                  * If this disk has been pulled into an fs devices created by
871                  * a device which had the CHANGING_FSID_V2 flag then replace the
872                  * metadata_uuid/fsid values of the fs_devices.
873                  */
874                 if (fs_devices->fsid_change &&
875                     found_transid > fs_devices->latest_generation) {
876                         memcpy(fs_devices->fsid, disk_super->fsid,
877                                         BTRFS_FSID_SIZE);
878
879                         if (has_metadata_uuid)
880                                 memcpy(fs_devices->metadata_uuid,
881                                        disk_super->metadata_uuid,
882                                        BTRFS_FSID_SIZE);
883                         else
884                                 memcpy(fs_devices->metadata_uuid,
885                                        disk_super->fsid, BTRFS_FSID_SIZE);
886
887                         fs_devices->fsid_change = false;
888                 }
889         }
890
891         if (!device) {
892                 if (fs_devices->opened) {
893                         mutex_unlock(&fs_devices->device_list_mutex);
894                         return ERR_PTR(-EBUSY);
895                 }
896
897                 device = btrfs_alloc_device(NULL, &devid,
898                                             disk_super->dev_item.uuid);
899                 if (IS_ERR(device)) {
900                         mutex_unlock(&fs_devices->device_list_mutex);
901                         /* we can safely leave the fs_devices entry around */
902                         return device;
903                 }
904
905                 name = rcu_string_strdup(path, GFP_NOFS);
906                 if (!name) {
907                         btrfs_free_device(device);
908                         mutex_unlock(&fs_devices->device_list_mutex);
909                         return ERR_PTR(-ENOMEM);
910                 }
911                 rcu_assign_pointer(device->name, name);
912
913                 list_add_rcu(&device->dev_list, &fs_devices->devices);
914                 fs_devices->num_devices++;
915
916                 device->fs_devices = fs_devices;
917                 *new_device_added = true;
918
919                 if (disk_super->label[0])
920                         pr_info(
921         "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
922                                 disk_super->label, devid, found_transid, path,
923                                 current->comm, task_pid_nr(current));
924                 else
925                         pr_info(
926         "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
927                                 disk_super->fsid, devid, found_transid, path,
928                                 current->comm, task_pid_nr(current));
929
930         } else if (!device->name || strcmp(device->name->str, path)) {
931                 /*
932                  * When FS is already mounted.
933                  * 1. If you are here and if the device->name is NULL that
934                  *    means this device was missing at time of FS mount.
935                  * 2. If you are here and if the device->name is different
936                  *    from 'path' that means either
937                  *      a. The same device disappeared and reappeared with
938                  *         different name. or
939                  *      b. The missing-disk-which-was-replaced, has
940                  *         reappeared now.
941                  *
942                  * We must allow 1 and 2a above. But 2b would be a spurious
943                  * and unintentional.
944                  *
945                  * Further in case of 1 and 2a above, the disk at 'path'
946                  * would have missed some transaction when it was away and
947                  * in case of 2a the stale bdev has to be updated as well.
948                  * 2b must not be allowed at all time.
949                  */
950
951                 /*
952                  * For now, we do allow update to btrfs_fs_device through the
953                  * btrfs dev scan cli after FS has been mounted.  We're still
954                  * tracking a problem where systems fail mount by subvolume id
955                  * when we reject replacement on a mounted FS.
956                  */
957                 if (!fs_devices->opened && found_transid < device->generation) {
958                         /*
959                          * That is if the FS is _not_ mounted and if you
960                          * are here, that means there is more than one
961                          * disk with same uuid and devid.We keep the one
962                          * with larger generation number or the last-in if
963                          * generation are equal.
964                          */
965                         mutex_unlock(&fs_devices->device_list_mutex);
966                         return ERR_PTR(-EEXIST);
967                 }
968
969                 /*
970                  * We are going to replace the device path for a given devid,
971                  * make sure it's the same device if the device is mounted
972                  */
973                 if (device->bdev) {
974                         struct block_device *path_bdev;
975
976                         path_bdev = lookup_bdev(path);
977                         if (IS_ERR(path_bdev)) {
978                                 mutex_unlock(&fs_devices->device_list_mutex);
979                                 return ERR_CAST(path_bdev);
980                         }
981
982                         if (device->bdev != path_bdev) {
983                                 bdput(path_bdev);
984                                 mutex_unlock(&fs_devices->device_list_mutex);
985                                 /*
986                                  * device->fs_info may not be reliable here, so
987                                  * pass in a NULL instead. This avoids a
988                                  * possible use-after-free when the fs_info and
989                                  * fs_info->sb are already torn down.
990                                  */
991                                 btrfs_warn_in_rcu(NULL,
992         "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
993                                                   path, devid, found_transid,
994                                                   current->comm,
995                                                   task_pid_nr(current));
996                                 return ERR_PTR(-EEXIST);
997                         }
998                         bdput(path_bdev);
999                         btrfs_info_in_rcu(device->fs_info,
1000         "devid %llu device path %s changed to %s scanned by %s (%d)",
1001                                           devid, rcu_str_deref(device->name),
1002                                           path, current->comm,
1003                                           task_pid_nr(current));
1004                 }
1005
1006                 name = rcu_string_strdup(path, GFP_NOFS);
1007                 if (!name) {
1008                         mutex_unlock(&fs_devices->device_list_mutex);
1009                         return ERR_PTR(-ENOMEM);
1010                 }
1011                 rcu_string_free(device->name);
1012                 rcu_assign_pointer(device->name, name);
1013                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1014                         fs_devices->missing_devices--;
1015                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1016                 }
1017         }
1018
1019         /*
1020          * Unmount does not free the btrfs_device struct but would zero
1021          * generation along with most of the other members. So just update
1022          * it back. We need it to pick the disk with largest generation
1023          * (as above).
1024          */
1025         if (!fs_devices->opened) {
1026                 device->generation = found_transid;
1027                 fs_devices->latest_generation = max_t(u64, found_transid,
1028                                                 fs_devices->latest_generation);
1029         }
1030
1031         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1032
1033         mutex_unlock(&fs_devices->device_list_mutex);
1034         return device;
1035 }
1036
1037 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1038 {
1039         struct btrfs_fs_devices *fs_devices;
1040         struct btrfs_device *device;
1041         struct btrfs_device *orig_dev;
1042         int ret = 0;
1043
1044         lockdep_assert_held(&uuid_mutex);
1045
1046         fs_devices = alloc_fs_devices(orig->fsid, NULL);
1047         if (IS_ERR(fs_devices))
1048                 return fs_devices;
1049
1050         fs_devices->total_devices = orig->total_devices;
1051
1052         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1053                 struct rcu_string *name;
1054
1055                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1056                                             orig_dev->uuid);
1057                 if (IS_ERR(device)) {
1058                         ret = PTR_ERR(device);
1059                         goto error;
1060                 }
1061
1062                 /*
1063                  * This is ok to do without rcu read locked because we hold the
1064                  * uuid mutex so nothing we touch in here is going to disappear.
1065                  */
1066                 if (orig_dev->name) {
1067                         name = rcu_string_strdup(orig_dev->name->str,
1068                                         GFP_KERNEL);
1069                         if (!name) {
1070                                 btrfs_free_device(device);
1071                                 ret = -ENOMEM;
1072                                 goto error;
1073                         }
1074                         rcu_assign_pointer(device->name, name);
1075                 }
1076
1077                 list_add(&device->dev_list, &fs_devices->devices);
1078                 device->fs_devices = fs_devices;
1079                 fs_devices->num_devices++;
1080         }
1081         return fs_devices;
1082 error:
1083         free_fs_devices(fs_devices);
1084         return ERR_PTR(ret);
1085 }
1086
1087 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1088                                       int step, struct btrfs_device **latest_dev)
1089 {
1090         struct btrfs_device *device, *next;
1091
1092         /* This is the initialized path, it is safe to release the devices. */
1093         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1094                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1095                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1096                                       &device->dev_state) &&
1097                             !test_bit(BTRFS_DEV_STATE_MISSING,
1098                                       &device->dev_state) &&
1099                             (!*latest_dev ||
1100                              device->generation > (*latest_dev)->generation)) {
1101                                 *latest_dev = device;
1102                         }
1103                         continue;
1104                 }
1105
1106                 /*
1107                  * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1108                  * in btrfs_init_dev_replace() so just continue.
1109                  */
1110                 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1111                         continue;
1112
1113                 if (device->bdev) {
1114                         blkdev_put(device->bdev, device->mode);
1115                         device->bdev = NULL;
1116                         fs_devices->open_devices--;
1117                 }
1118                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1119                         list_del_init(&device->dev_alloc_list);
1120                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1121                         fs_devices->rw_devices--;
1122                 }
1123                 list_del_init(&device->dev_list);
1124                 fs_devices->num_devices--;
1125                 btrfs_free_device(device);
1126         }
1127
1128 }
1129
1130 /*
1131  * After we have read the system tree and know devids belonging to this
1132  * filesystem, remove the device which does not belong there.
1133  */
1134 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1135 {
1136         struct btrfs_device *latest_dev = NULL;
1137         struct btrfs_fs_devices *seed_dev;
1138
1139         mutex_lock(&uuid_mutex);
1140         __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1141
1142         list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1143                 __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1144
1145         fs_devices->latest_bdev = latest_dev->bdev;
1146
1147         mutex_unlock(&uuid_mutex);
1148 }
1149
1150 static void btrfs_close_bdev(struct btrfs_device *device)
1151 {
1152         if (!device->bdev)
1153                 return;
1154
1155         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1156                 sync_blockdev(device->bdev);
1157                 invalidate_bdev(device->bdev);
1158         }
1159
1160         blkdev_put(device->bdev, device->mode);
1161 }
1162
1163 static void btrfs_close_one_device(struct btrfs_device *device)
1164 {
1165         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1166
1167         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1168             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1169                 list_del_init(&device->dev_alloc_list);
1170                 fs_devices->rw_devices--;
1171         }
1172
1173         if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1174                 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1175
1176         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1177                 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1178                 fs_devices->missing_devices--;
1179         }
1180
1181         btrfs_close_bdev(device);
1182         if (device->bdev) {
1183                 fs_devices->open_devices--;
1184                 device->bdev = NULL;
1185         }
1186         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1187
1188         device->fs_info = NULL;
1189         atomic_set(&device->dev_stats_ccnt, 0);
1190         extent_io_tree_release(&device->alloc_state);
1191
1192         /*
1193          * Reset the flush error record. We might have a transient flush error
1194          * in this mount, and if so we aborted the current transaction and set
1195          * the fs to an error state, guaranteeing no super blocks can be further
1196          * committed. However that error might be transient and if we unmount the
1197          * filesystem and mount it again, we should allow the mount to succeed
1198          * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1199          * filesystem again we still get flush errors, then we will again abort
1200          * any transaction and set the error state, guaranteeing no commits of
1201          * unsafe super blocks.
1202          */
1203         device->last_flush_error = 0;
1204
1205         /* Verify the device is back in a pristine state  */
1206         ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1207         ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1208         ASSERT(list_empty(&device->dev_alloc_list));
1209         ASSERT(list_empty(&device->post_commit_list));
1210         ASSERT(atomic_read(&device->reada_in_flight) == 0);
1211 }
1212
1213 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1214 {
1215         struct btrfs_device *device, *tmp;
1216
1217         lockdep_assert_held(&uuid_mutex);
1218
1219         if (--fs_devices->opened > 0)
1220                 return;
1221
1222         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1223                 btrfs_close_one_device(device);
1224
1225         WARN_ON(fs_devices->open_devices);
1226         WARN_ON(fs_devices->rw_devices);
1227         fs_devices->opened = 0;
1228         fs_devices->seeding = false;
1229         fs_devices->fs_info = NULL;
1230 }
1231
1232 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1233 {
1234         LIST_HEAD(list);
1235         struct btrfs_fs_devices *tmp;
1236
1237         mutex_lock(&uuid_mutex);
1238         close_fs_devices(fs_devices);
1239         if (!fs_devices->opened) {
1240                 list_splice_init(&fs_devices->seed_list, &list);
1241
1242                 /*
1243                  * If the struct btrfs_fs_devices is not assembled with any
1244                  * other device, it can be re-initialized during the next mount
1245                  * without the needing device-scan step. Therefore, it can be
1246                  * fully freed.
1247                  */
1248                 if (fs_devices->num_devices == 1) {
1249                         list_del(&fs_devices->fs_list);
1250                         free_fs_devices(fs_devices);
1251                 }
1252         }
1253
1254
1255         list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1256                 close_fs_devices(fs_devices);
1257                 list_del(&fs_devices->seed_list);
1258                 free_fs_devices(fs_devices);
1259         }
1260         mutex_unlock(&uuid_mutex);
1261 }
1262
1263 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1264                                 fmode_t flags, void *holder)
1265 {
1266         struct btrfs_device *device;
1267         struct btrfs_device *latest_dev = NULL;
1268         struct btrfs_device *tmp_device;
1269         int ret = 0;
1270
1271         flags |= FMODE_EXCL;
1272
1273         list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1274                                  dev_list) {
1275                 int ret2;
1276
1277                 ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
1278                 if (ret2 == 0 &&
1279                     (!latest_dev || device->generation > latest_dev->generation)) {
1280                         latest_dev = device;
1281                 } else if (ret2 == -ENODATA) {
1282                         fs_devices->num_devices--;
1283                         list_del(&device->dev_list);
1284                         btrfs_free_device(device);
1285                 }
1286                 if (ret == 0 && ret2 != 0)
1287                         ret = ret2;
1288         }
1289
1290         if (fs_devices->open_devices == 0) {
1291                 if (ret)
1292                         return ret;
1293                 return -EINVAL;
1294         }
1295
1296         fs_devices->opened = 1;
1297         fs_devices->latest_bdev = latest_dev->bdev;
1298         fs_devices->total_rw_bytes = 0;
1299         fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1300
1301         return 0;
1302 }
1303
1304 static int devid_cmp(void *priv, const struct list_head *a,
1305                      const struct list_head *b)
1306 {
1307         struct btrfs_device *dev1, *dev2;
1308
1309         dev1 = list_entry(a, struct btrfs_device, dev_list);
1310         dev2 = list_entry(b, struct btrfs_device, dev_list);
1311
1312         if (dev1->devid < dev2->devid)
1313                 return -1;
1314         else if (dev1->devid > dev2->devid)
1315                 return 1;
1316         return 0;
1317 }
1318
1319 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1320                        fmode_t flags, void *holder)
1321 {
1322         int ret;
1323
1324         lockdep_assert_held(&uuid_mutex);
1325         /*
1326          * The device_list_mutex cannot be taken here in case opening the
1327          * underlying device takes further locks like bd_mutex.
1328          *
1329          * We also don't need the lock here as this is called during mount and
1330          * exclusion is provided by uuid_mutex
1331          */
1332
1333         if (fs_devices->opened) {
1334                 fs_devices->opened++;
1335                 ret = 0;
1336         } else {
1337                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1338                 ret = open_fs_devices(fs_devices, flags, holder);
1339         }
1340
1341         return ret;
1342 }
1343
1344 void btrfs_release_disk_super(struct btrfs_super_block *super)
1345 {
1346         struct page *page = virt_to_page(super);
1347
1348         put_page(page);
1349 }
1350
1351 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1352                                                        u64 bytenr)
1353 {
1354         struct btrfs_super_block *disk_super;
1355         struct page *page;
1356         void *p;
1357         pgoff_t index;
1358
1359         /* make sure our super fits in the device */
1360         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1361                 return ERR_PTR(-EINVAL);
1362
1363         /* make sure our super fits in the page */
1364         if (sizeof(*disk_super) > PAGE_SIZE)
1365                 return ERR_PTR(-EINVAL);
1366
1367         /* make sure our super doesn't straddle pages on disk */
1368         index = bytenr >> PAGE_SHIFT;
1369         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1370                 return ERR_PTR(-EINVAL);
1371
1372         /* pull in the page with our super */
1373         page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1374
1375         if (IS_ERR(page))
1376                 return ERR_CAST(page);
1377
1378         p = page_address(page);
1379
1380         /* align our pointer to the offset of the super block */
1381         disk_super = p + offset_in_page(bytenr);
1382
1383         if (btrfs_super_bytenr(disk_super) != bytenr ||
1384             btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1385                 btrfs_release_disk_super(p);
1386                 return ERR_PTR(-EINVAL);
1387         }
1388
1389         if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1390                 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1391
1392         return disk_super;
1393 }
1394
1395 int btrfs_forget_devices(const char *path)
1396 {
1397         int ret;
1398
1399         mutex_lock(&uuid_mutex);
1400         ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1401         mutex_unlock(&uuid_mutex);
1402
1403         return ret;
1404 }
1405
1406 /*
1407  * Look for a btrfs signature on a device. This may be called out of the mount path
1408  * and we are not allowed to call set_blocksize during the scan. The superblock
1409  * is read via pagecache
1410  */
1411 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1412                                            void *holder)
1413 {
1414         struct btrfs_super_block *disk_super;
1415         bool new_device_added = false;
1416         struct btrfs_device *device = NULL;
1417         struct block_device *bdev;
1418         u64 bytenr;
1419
1420         lockdep_assert_held(&uuid_mutex);
1421
1422         /*
1423          * we would like to check all the supers, but that would make
1424          * a btrfs mount succeed after a mkfs from a different FS.
1425          * So, we need to add a special mount option to scan for
1426          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1427          */
1428         bytenr = btrfs_sb_offset(0);
1429
1430         /*
1431          * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
1432          * initiate the device scan which may race with the user's mount
1433          * or mkfs command, resulting in failure.
1434          * Since the device scan is solely for reading purposes, there is
1435          * no need for FMODE_EXCL. Additionally, the devices are read again
1436          * during the mount process. It is ok to get some inconsistent
1437          * values temporarily, as the device paths of the fsid are the only
1438          * required information for assembling the volume.
1439          */
1440         bdev = blkdev_get_by_path(path, flags, holder);
1441         if (IS_ERR(bdev))
1442                 return ERR_CAST(bdev);
1443
1444         disk_super = btrfs_read_disk_super(bdev, bytenr);
1445         if (IS_ERR(disk_super)) {
1446                 device = ERR_CAST(disk_super);
1447                 goto error_bdev_put;
1448         }
1449
1450         device = device_list_add(path, disk_super, &new_device_added);
1451         if (!IS_ERR(device)) {
1452                 if (new_device_added)
1453                         btrfs_free_stale_devices(path, device);
1454         }
1455
1456         btrfs_release_disk_super(disk_super);
1457
1458 error_bdev_put:
1459         blkdev_put(bdev, flags);
1460
1461         return device;
1462 }
1463
1464 /*
1465  * Try to find a chunk that intersects [start, start + len] range and when one
1466  * such is found, record the end of it in *start
1467  */
1468 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1469                                     u64 len)
1470 {
1471         u64 physical_start, physical_end;
1472
1473         lockdep_assert_held(&device->fs_info->chunk_mutex);
1474
1475         if (!find_first_extent_bit(&device->alloc_state, *start,
1476                                    &physical_start, &physical_end,
1477                                    CHUNK_ALLOCATED, NULL)) {
1478
1479                 if (in_range(physical_start, *start, len) ||
1480                     in_range(*start, physical_start,
1481                              physical_end + 1 - physical_start)) {
1482                         *start = physical_end + 1;
1483                         return true;
1484                 }
1485         }
1486         return false;
1487 }
1488
1489 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1490 {
1491         switch (device->fs_devices->chunk_alloc_policy) {
1492         case BTRFS_CHUNK_ALLOC_REGULAR:
1493                 /*
1494                  * We don't want to overwrite the superblock on the drive nor
1495                  * any area used by the boot loader (grub for example), so we
1496                  * make sure to start at an offset of at least 1MB.
1497                  */
1498                 return max_t(u64, start, SZ_1M);
1499         default:
1500                 BUG();
1501         }
1502 }
1503
1504 /**
1505  * dev_extent_hole_check - check if specified hole is suitable for allocation
1506  * @device:     the device which we have the hole
1507  * @hole_start: starting position of the hole
1508  * @hole_size:  the size of the hole
1509  * @num_bytes:  the size of the free space that we need
1510  *
1511  * This function may modify @hole_start and @hole_end to reflect the suitable
1512  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1513  */
1514 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1515                                   u64 *hole_size, u64 num_bytes)
1516 {
1517         bool changed = false;
1518         u64 hole_end = *hole_start + *hole_size;
1519
1520         /*
1521          * Check before we set max_hole_start, otherwise we could end up
1522          * sending back this offset anyway.
1523          */
1524         if (contains_pending_extent(device, hole_start, *hole_size)) {
1525                 if (hole_end >= *hole_start)
1526                         *hole_size = hole_end - *hole_start;
1527                 else
1528                         *hole_size = 0;
1529                 changed = true;
1530         }
1531
1532         switch (device->fs_devices->chunk_alloc_policy) {
1533         case BTRFS_CHUNK_ALLOC_REGULAR:
1534                 /* No extra check */
1535                 break;
1536         default:
1537                 BUG();
1538         }
1539
1540         return changed;
1541 }
1542
1543 /*
1544  * find_free_dev_extent_start - find free space in the specified device
1545  * @device:       the device which we search the free space in
1546  * @num_bytes:    the size of the free space that we need
1547  * @search_start: the position from which to begin the search
1548  * @start:        store the start of the free space.
1549  * @len:          the size of the free space. that we find, or the size
1550  *                of the max free space if we don't find suitable free space
1551  *
1552  * this uses a pretty simple search, the expectation is that it is
1553  * called very infrequently and that a given device has a small number
1554  * of extents
1555  *
1556  * @start is used to store the start of the free space if we find. But if we
1557  * don't find suitable free space, it will be used to store the start position
1558  * of the max free space.
1559  *
1560  * @len is used to store the size of the free space that we find.
1561  * But if we don't find suitable free space, it is used to store the size of
1562  * the max free space.
1563  *
1564  * NOTE: This function will search *commit* root of device tree, and does extra
1565  * check to ensure dev extents are not double allocated.
1566  * This makes the function safe to allocate dev extents but may not report
1567  * correct usable device space, as device extent freed in current transaction
1568  * is not reported as avaiable.
1569  */
1570 static int find_free_dev_extent_start(struct btrfs_device *device,
1571                                 u64 num_bytes, u64 search_start, u64 *start,
1572                                 u64 *len)
1573 {
1574         struct btrfs_fs_info *fs_info = device->fs_info;
1575         struct btrfs_root *root = fs_info->dev_root;
1576         struct btrfs_key key;
1577         struct btrfs_dev_extent *dev_extent;
1578         struct btrfs_path *path;
1579         u64 hole_size;
1580         u64 max_hole_start;
1581         u64 max_hole_size;
1582         u64 extent_end;
1583         u64 search_end = device->total_bytes;
1584         int ret;
1585         int slot;
1586         struct extent_buffer *l;
1587
1588         search_start = dev_extent_search_start(device, search_start);
1589
1590         path = btrfs_alloc_path();
1591         if (!path)
1592                 return -ENOMEM;
1593
1594         max_hole_start = search_start;
1595         max_hole_size = 0;
1596
1597 again:
1598         if (search_start >= search_end ||
1599                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1600                 ret = -ENOSPC;
1601                 goto out;
1602         }
1603
1604         path->reada = READA_FORWARD;
1605         path->search_commit_root = 1;
1606         path->skip_locking = 1;
1607
1608         key.objectid = device->devid;
1609         key.offset = search_start;
1610         key.type = BTRFS_DEV_EXTENT_KEY;
1611
1612         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1613         if (ret < 0)
1614                 goto out;
1615         if (ret > 0) {
1616                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1617                 if (ret < 0)
1618                         goto out;
1619         }
1620
1621         while (search_start < search_end) {
1622                 l = path->nodes[0];
1623                 slot = path->slots[0];
1624                 if (slot >= btrfs_header_nritems(l)) {
1625                         ret = btrfs_next_leaf(root, path);
1626                         if (ret == 0)
1627                                 continue;
1628                         if (ret < 0)
1629                                 goto out;
1630
1631                         break;
1632                 }
1633                 btrfs_item_key_to_cpu(l, &key, slot);
1634
1635                 if (key.objectid < device->devid)
1636                         goto next;
1637
1638                 if (key.objectid > device->devid)
1639                         break;
1640
1641                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1642                         goto next;
1643
1644                 if (key.offset > search_end)
1645                         break;
1646
1647                 if (key.offset > search_start) {
1648                         hole_size = key.offset - search_start;
1649                         dev_extent_hole_check(device, &search_start, &hole_size,
1650                                               num_bytes);
1651
1652                         if (hole_size > max_hole_size) {
1653                                 max_hole_start = search_start;
1654                                 max_hole_size = hole_size;
1655                         }
1656
1657                         /*
1658                          * If this free space is greater than which we need,
1659                          * it must be the max free space that we have found
1660                          * until now, so max_hole_start must point to the start
1661                          * of this free space and the length of this free space
1662                          * is stored in max_hole_size. Thus, we return
1663                          * max_hole_start and max_hole_size and go back to the
1664                          * caller.
1665                          */
1666                         if (hole_size >= num_bytes) {
1667                                 ret = 0;
1668                                 goto out;
1669                         }
1670                 }
1671
1672                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1673                 extent_end = key.offset + btrfs_dev_extent_length(l,
1674                                                                   dev_extent);
1675                 if (extent_end > search_start)
1676                         search_start = extent_end;
1677 next:
1678                 path->slots[0]++;
1679                 cond_resched();
1680         }
1681
1682         /*
1683          * At this point, search_start should be the end of
1684          * allocated dev extents, and when shrinking the device,
1685          * search_end may be smaller than search_start.
1686          */
1687         if (search_end > search_start) {
1688                 hole_size = search_end - search_start;
1689                 if (dev_extent_hole_check(device, &search_start, &hole_size,
1690                                           num_bytes)) {
1691                         btrfs_release_path(path);
1692                         goto again;
1693                 }
1694
1695                 if (hole_size > max_hole_size) {
1696                         max_hole_start = search_start;
1697                         max_hole_size = hole_size;
1698                 }
1699         }
1700
1701         /* See above. */
1702         if (max_hole_size < num_bytes)
1703                 ret = -ENOSPC;
1704         else
1705                 ret = 0;
1706
1707         ASSERT(max_hole_start + max_hole_size <= search_end);
1708 out:
1709         btrfs_free_path(path);
1710         *start = max_hole_start;
1711         if (len)
1712                 *len = max_hole_size;
1713         return ret;
1714 }
1715
1716 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1717                          u64 *start, u64 *len)
1718 {
1719         /* FIXME use last free of some kind */
1720         return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1721 }
1722
1723 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1724                           struct btrfs_device *device,
1725                           u64 start, u64 *dev_extent_len)
1726 {
1727         struct btrfs_fs_info *fs_info = device->fs_info;
1728         struct btrfs_root *root = fs_info->dev_root;
1729         int ret;
1730         struct btrfs_path *path;
1731         struct btrfs_key key;
1732         struct btrfs_key found_key;
1733         struct extent_buffer *leaf = NULL;
1734         struct btrfs_dev_extent *extent = NULL;
1735
1736         path = btrfs_alloc_path();
1737         if (!path)
1738                 return -ENOMEM;
1739
1740         key.objectid = device->devid;
1741         key.offset = start;
1742         key.type = BTRFS_DEV_EXTENT_KEY;
1743 again:
1744         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1745         if (ret > 0) {
1746                 ret = btrfs_previous_item(root, path, key.objectid,
1747                                           BTRFS_DEV_EXTENT_KEY);
1748                 if (ret)
1749                         goto out;
1750                 leaf = path->nodes[0];
1751                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1752                 extent = btrfs_item_ptr(leaf, path->slots[0],
1753                                         struct btrfs_dev_extent);
1754                 BUG_ON(found_key.offset > start || found_key.offset +
1755                        btrfs_dev_extent_length(leaf, extent) < start);
1756                 key = found_key;
1757                 btrfs_release_path(path);
1758                 goto again;
1759         } else if (ret == 0) {
1760                 leaf = path->nodes[0];
1761                 extent = btrfs_item_ptr(leaf, path->slots[0],
1762                                         struct btrfs_dev_extent);
1763         } else {
1764                 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1765                 goto out;
1766         }
1767
1768         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1769
1770         ret = btrfs_del_item(trans, root, path);
1771         if (ret) {
1772                 btrfs_handle_fs_error(fs_info, ret,
1773                                       "Failed to remove dev extent item");
1774         } else {
1775                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1776         }
1777 out:
1778         btrfs_free_path(path);
1779         return ret;
1780 }
1781
1782 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1783                                   struct btrfs_device *device,
1784                                   u64 chunk_offset, u64 start, u64 num_bytes)
1785 {
1786         int ret;
1787         struct btrfs_path *path;
1788         struct btrfs_fs_info *fs_info = device->fs_info;
1789         struct btrfs_root *root = fs_info->dev_root;
1790         struct btrfs_dev_extent *extent;
1791         struct extent_buffer *leaf;
1792         struct btrfs_key key;
1793
1794         WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1795         WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1796         path = btrfs_alloc_path();
1797         if (!path)
1798                 return -ENOMEM;
1799
1800         key.objectid = device->devid;
1801         key.offset = start;
1802         key.type = BTRFS_DEV_EXTENT_KEY;
1803         ret = btrfs_insert_empty_item(trans, root, path, &key,
1804                                       sizeof(*extent));
1805         if (ret)
1806                 goto out;
1807
1808         leaf = path->nodes[0];
1809         extent = btrfs_item_ptr(leaf, path->slots[0],
1810                                 struct btrfs_dev_extent);
1811         btrfs_set_dev_extent_chunk_tree(leaf, extent,
1812                                         BTRFS_CHUNK_TREE_OBJECTID);
1813         btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1814                                             BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1815         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1816
1817         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1818         btrfs_mark_buffer_dirty(leaf);
1819 out:
1820         btrfs_free_path(path);
1821         return ret;
1822 }
1823
1824 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1825 {
1826         struct extent_map_tree *em_tree;
1827         struct extent_map *em;
1828         struct rb_node *n;
1829         u64 ret = 0;
1830
1831         em_tree = &fs_info->mapping_tree;
1832         read_lock(&em_tree->lock);
1833         n = rb_last(&em_tree->map.rb_root);
1834         if (n) {
1835                 em = rb_entry(n, struct extent_map, rb_node);
1836                 ret = em->start + em->len;
1837         }
1838         read_unlock(&em_tree->lock);
1839
1840         return ret;
1841 }
1842
1843 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1844                                     u64 *devid_ret)
1845 {
1846         int ret;
1847         struct btrfs_key key;
1848         struct btrfs_key found_key;
1849         struct btrfs_path *path;
1850
1851         path = btrfs_alloc_path();
1852         if (!path)
1853                 return -ENOMEM;
1854
1855         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1856         key.type = BTRFS_DEV_ITEM_KEY;
1857         key.offset = (u64)-1;
1858
1859         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1860         if (ret < 0)
1861                 goto error;
1862
1863         if (ret == 0) {
1864                 /* Corruption */
1865                 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1866                 ret = -EUCLEAN;
1867                 goto error;
1868         }
1869
1870         ret = btrfs_previous_item(fs_info->chunk_root, path,
1871                                   BTRFS_DEV_ITEMS_OBJECTID,
1872                                   BTRFS_DEV_ITEM_KEY);
1873         if (ret) {
1874                 *devid_ret = 1;
1875         } else {
1876                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1877                                       path->slots[0]);
1878                 *devid_ret = found_key.offset + 1;
1879         }
1880         ret = 0;
1881 error:
1882         btrfs_free_path(path);
1883         return ret;
1884 }
1885
1886 /*
1887  * the device information is stored in the chunk root
1888  * the btrfs_device struct should be fully filled in
1889  */
1890 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1891                             struct btrfs_device *device)
1892 {
1893         int ret;
1894         struct btrfs_path *path;
1895         struct btrfs_dev_item *dev_item;
1896         struct extent_buffer *leaf;
1897         struct btrfs_key key;
1898         unsigned long ptr;
1899
1900         path = btrfs_alloc_path();
1901         if (!path)
1902                 return -ENOMEM;
1903
1904         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1905         key.type = BTRFS_DEV_ITEM_KEY;
1906         key.offset = device->devid;
1907
1908         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1909                                       &key, sizeof(*dev_item));
1910         if (ret)
1911                 goto out;
1912
1913         leaf = path->nodes[0];
1914         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1915
1916         btrfs_set_device_id(leaf, dev_item, device->devid);
1917         btrfs_set_device_generation(leaf, dev_item, 0);
1918         btrfs_set_device_type(leaf, dev_item, device->type);
1919         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1920         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1921         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1922         btrfs_set_device_total_bytes(leaf, dev_item,
1923                                      btrfs_device_get_disk_total_bytes(device));
1924         btrfs_set_device_bytes_used(leaf, dev_item,
1925                                     btrfs_device_get_bytes_used(device));
1926         btrfs_set_device_group(leaf, dev_item, 0);
1927         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1928         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1929         btrfs_set_device_start_offset(leaf, dev_item, 0);
1930
1931         ptr = btrfs_device_uuid(dev_item);
1932         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1933         ptr = btrfs_device_fsid(dev_item);
1934         write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1935                             ptr, BTRFS_FSID_SIZE);
1936         btrfs_mark_buffer_dirty(leaf);
1937
1938         ret = 0;
1939 out:
1940         btrfs_free_path(path);
1941         return ret;
1942 }
1943
1944 /*
1945  * Function to update ctime/mtime for a given device path.
1946  * Mainly used for ctime/mtime based probe like libblkid.
1947  *
1948  * We don't care about errors here, this is just to be kind to userspace.
1949  */
1950 static void update_dev_time(const char *device_path)
1951 {
1952         struct path path;
1953         struct timespec64 now;
1954         int ret;
1955
1956         ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1957         if (ret)
1958                 return;
1959
1960         now = current_time(d_inode(path.dentry));
1961         inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1962         path_put(&path);
1963 }
1964
1965 static int btrfs_rm_dev_item(struct btrfs_device *device)
1966 {
1967         struct btrfs_root *root = device->fs_info->chunk_root;
1968         int ret;
1969         struct btrfs_path *path;
1970         struct btrfs_key key;
1971         struct btrfs_trans_handle *trans;
1972
1973         path = btrfs_alloc_path();
1974         if (!path)
1975                 return -ENOMEM;
1976
1977         trans = btrfs_start_transaction(root, 0);
1978         if (IS_ERR(trans)) {
1979                 btrfs_free_path(path);
1980                 return PTR_ERR(trans);
1981         }
1982         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1983         key.type = BTRFS_DEV_ITEM_KEY;
1984         key.offset = device->devid;
1985
1986         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1987         if (ret) {
1988                 if (ret > 0)
1989                         ret = -ENOENT;
1990                 btrfs_abort_transaction(trans, ret);
1991                 btrfs_end_transaction(trans);
1992                 goto out;
1993         }
1994
1995         ret = btrfs_del_item(trans, root, path);
1996         if (ret) {
1997                 btrfs_abort_transaction(trans, ret);
1998                 btrfs_end_transaction(trans);
1999         }
2000
2001 out:
2002         btrfs_free_path(path);
2003         if (!ret)
2004                 ret = btrfs_commit_transaction(trans);
2005         return ret;
2006 }
2007
2008 /*
2009  * Verify that @num_devices satisfies the RAID profile constraints in the whole
2010  * filesystem. It's up to the caller to adjust that number regarding eg. device
2011  * replace.
2012  */
2013 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2014                 u64 num_devices)
2015 {
2016         u64 all_avail;
2017         unsigned seq;
2018         int i;
2019
2020         do {
2021                 seq = read_seqbegin(&fs_info->profiles_lock);
2022
2023                 all_avail = fs_info->avail_data_alloc_bits |
2024                             fs_info->avail_system_alloc_bits |
2025                             fs_info->avail_metadata_alloc_bits;
2026         } while (read_seqretry(&fs_info->profiles_lock, seq));
2027
2028         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2029                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
2030                         continue;
2031
2032                 if (num_devices < btrfs_raid_array[i].devs_min) {
2033                         int ret = btrfs_raid_array[i].mindev_error;
2034
2035                         if (ret)
2036                                 return ret;
2037                 }
2038         }
2039
2040         return 0;
2041 }
2042
2043 static struct btrfs_device * btrfs_find_next_active_device(
2044                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2045 {
2046         struct btrfs_device *next_device;
2047
2048         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2049                 if (next_device != device &&
2050                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2051                     && next_device->bdev)
2052                         return next_device;
2053         }
2054
2055         return NULL;
2056 }
2057
2058 /*
2059  * Helper function to check if the given device is part of s_bdev / latest_bdev
2060  * and replace it with the provided or the next active device, in the context
2061  * where this function called, there should be always be another device (or
2062  * this_dev) which is active.
2063  */
2064 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2065                                             struct btrfs_device *next_device)
2066 {
2067         struct btrfs_fs_info *fs_info = device->fs_info;
2068
2069         if (!next_device)
2070                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2071                                                             device);
2072         ASSERT(next_device);
2073
2074         if (fs_info->sb->s_bdev &&
2075                         (fs_info->sb->s_bdev == device->bdev))
2076                 fs_info->sb->s_bdev = next_device->bdev;
2077
2078         if (fs_info->fs_devices->latest_bdev == device->bdev)
2079                 fs_info->fs_devices->latest_bdev = next_device->bdev;
2080 }
2081
2082 /*
2083  * Return btrfs_fs_devices::num_devices excluding the device that's being
2084  * currently replaced.
2085  */
2086 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2087 {
2088         u64 num_devices = fs_info->fs_devices->num_devices;
2089
2090         down_read(&fs_info->dev_replace.rwsem);
2091         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2092                 ASSERT(num_devices > 1);
2093                 num_devices--;
2094         }
2095         up_read(&fs_info->dev_replace.rwsem);
2096
2097         return num_devices;
2098 }
2099
2100 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2101                                struct block_device *bdev,
2102                                const char *device_path)
2103 {
2104         struct btrfs_super_block *disk_super;
2105         int copy_num;
2106
2107         if (!bdev)
2108                 return;
2109
2110         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2111                 struct page *page;
2112                 int ret;
2113
2114                 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2115                 if (IS_ERR(disk_super))
2116                         continue;
2117
2118                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2119
2120                 page = virt_to_page(disk_super);
2121                 set_page_dirty(page);
2122                 lock_page(page);
2123                 /* write_on_page() unlocks the page */
2124                 ret = write_one_page(page);
2125                 if (ret)
2126                         btrfs_warn(fs_info,
2127                                 "error clearing superblock number %d (%d)",
2128                                 copy_num, ret);
2129                 btrfs_release_disk_super(disk_super);
2130
2131         }
2132
2133         /* Notify udev that device has changed */
2134         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2135
2136         /* Update ctime/mtime for device path for libblkid */
2137         update_dev_time(device_path);
2138 }
2139
2140 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2141                     u64 devid)
2142 {
2143         struct btrfs_device *device;
2144         struct btrfs_fs_devices *cur_devices;
2145         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2146         u64 num_devices;
2147         int ret = 0;
2148
2149         /*
2150          * The device list in fs_devices is accessed without locks (neither
2151          * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2152          * filesystem and another device rm cannot run.
2153          */
2154         num_devices = btrfs_num_devices(fs_info);
2155
2156         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2157         if (ret)
2158                 goto out;
2159
2160         device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2161
2162         if (IS_ERR(device)) {
2163                 if (PTR_ERR(device) == -ENOENT &&
2164                     device_path && strcmp(device_path, "missing") == 0)
2165                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2166                 else
2167                         ret = PTR_ERR(device);
2168                 goto out;
2169         }
2170
2171         if (btrfs_pinned_by_swapfile(fs_info, device)) {
2172                 btrfs_warn_in_rcu(fs_info,
2173                   "cannot remove device %s (devid %llu) due to active swapfile",
2174                                   rcu_str_deref(device->name), device->devid);
2175                 ret = -ETXTBSY;
2176                 goto out;
2177         }
2178
2179         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2180                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2181                 goto out;
2182         }
2183
2184         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2185             fs_info->fs_devices->rw_devices == 1) {
2186                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2187                 goto out;
2188         }
2189
2190         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2191                 mutex_lock(&fs_info->chunk_mutex);
2192                 list_del_init(&device->dev_alloc_list);
2193                 device->fs_devices->rw_devices--;
2194                 mutex_unlock(&fs_info->chunk_mutex);
2195         }
2196
2197         ret = btrfs_shrink_device(device, 0);
2198         if (!ret)
2199                 btrfs_reada_remove_dev(device);
2200         if (ret)
2201                 goto error_undo;
2202
2203         /*
2204          * TODO: the superblock still includes this device in its num_devices
2205          * counter although write_all_supers() is not locked out. This
2206          * could give a filesystem state which requires a degraded mount.
2207          */
2208         ret = btrfs_rm_dev_item(device);
2209         if (ret)
2210                 goto error_undo;
2211
2212         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2213         btrfs_scrub_cancel_dev(device);
2214
2215         /*
2216          * the device list mutex makes sure that we don't change
2217          * the device list while someone else is writing out all
2218          * the device supers. Whoever is writing all supers, should
2219          * lock the device list mutex before getting the number of
2220          * devices in the super block (super_copy). Conversely,
2221          * whoever updates the number of devices in the super block
2222          * (super_copy) should hold the device list mutex.
2223          */
2224
2225         /*
2226          * In normal cases the cur_devices == fs_devices. But in case
2227          * of deleting a seed device, the cur_devices should point to
2228          * its own fs_devices listed under the fs_devices->seed.
2229          */
2230         cur_devices = device->fs_devices;
2231         mutex_lock(&fs_devices->device_list_mutex);
2232         list_del_rcu(&device->dev_list);
2233
2234         cur_devices->num_devices--;
2235         cur_devices->total_devices--;
2236         /* Update total_devices of the parent fs_devices if it's seed */
2237         if (cur_devices != fs_devices)
2238                 fs_devices->total_devices--;
2239
2240         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2241                 cur_devices->missing_devices--;
2242
2243         btrfs_assign_next_active_device(device, NULL);
2244
2245         if (device->bdev) {
2246                 cur_devices->open_devices--;
2247                 /* remove sysfs entry */
2248                 btrfs_sysfs_remove_device(device);
2249         }
2250
2251         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2252         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2253         mutex_unlock(&fs_devices->device_list_mutex);
2254
2255         /*
2256          * at this point, the device is zero sized and detached from
2257          * the devices list.  All that's left is to zero out the old
2258          * supers and free the device.
2259          */
2260         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2261                 btrfs_scratch_superblocks(fs_info, device->bdev,
2262                                           device->name->str);
2263
2264         btrfs_close_bdev(device);
2265         synchronize_rcu();
2266         btrfs_free_device(device);
2267
2268         if (cur_devices->open_devices == 0) {
2269                 list_del_init(&cur_devices->seed_list);
2270                 close_fs_devices(cur_devices);
2271                 free_fs_devices(cur_devices);
2272         }
2273
2274 out:
2275         return ret;
2276
2277 error_undo:
2278         btrfs_reada_undo_remove_dev(device);
2279         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2280                 mutex_lock(&fs_info->chunk_mutex);
2281                 list_add(&device->dev_alloc_list,
2282                          &fs_devices->alloc_list);
2283                 device->fs_devices->rw_devices++;
2284                 mutex_unlock(&fs_info->chunk_mutex);
2285         }
2286         goto out;
2287 }
2288
2289 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2290 {
2291         struct btrfs_fs_devices *fs_devices;
2292
2293         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2294
2295         /*
2296          * in case of fs with no seed, srcdev->fs_devices will point
2297          * to fs_devices of fs_info. However when the dev being replaced is
2298          * a seed dev it will point to the seed's local fs_devices. In short
2299          * srcdev will have its correct fs_devices in both the cases.
2300          */
2301         fs_devices = srcdev->fs_devices;
2302
2303         list_del_rcu(&srcdev->dev_list);
2304         list_del(&srcdev->dev_alloc_list);
2305         fs_devices->num_devices--;
2306         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2307                 fs_devices->missing_devices--;
2308
2309         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2310                 fs_devices->rw_devices--;
2311
2312         if (srcdev->bdev)
2313                 fs_devices->open_devices--;
2314 }
2315
2316 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2317 {
2318         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2319
2320         mutex_lock(&uuid_mutex);
2321
2322         btrfs_close_bdev(srcdev);
2323         synchronize_rcu();
2324         btrfs_free_device(srcdev);
2325
2326         /* if this is no devs we rather delete the fs_devices */
2327         if (!fs_devices->num_devices) {
2328                 /*
2329                  * On a mounted FS, num_devices can't be zero unless it's a
2330                  * seed. In case of a seed device being replaced, the replace
2331                  * target added to the sprout FS, so there will be no more
2332                  * device left under the seed FS.
2333                  */
2334                 ASSERT(fs_devices->seeding);
2335
2336                 list_del_init(&fs_devices->seed_list);
2337                 close_fs_devices(fs_devices);
2338                 free_fs_devices(fs_devices);
2339         }
2340         mutex_unlock(&uuid_mutex);
2341 }
2342
2343 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2344 {
2345         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2346
2347         mutex_lock(&fs_devices->device_list_mutex);
2348
2349         btrfs_sysfs_remove_device(tgtdev);
2350
2351         if (tgtdev->bdev)
2352                 fs_devices->open_devices--;
2353
2354         fs_devices->num_devices--;
2355
2356         btrfs_assign_next_active_device(tgtdev, NULL);
2357
2358         list_del_rcu(&tgtdev->dev_list);
2359
2360         mutex_unlock(&fs_devices->device_list_mutex);
2361
2362         /*
2363          * The update_dev_time() with in btrfs_scratch_superblocks()
2364          * may lead to a call to btrfs_show_devname() which will try
2365          * to hold device_list_mutex. And here this device
2366          * is already out of device list, so we don't have to hold
2367          * the device_list_mutex lock.
2368          */
2369         btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2370                                   tgtdev->name->str);
2371
2372         btrfs_close_bdev(tgtdev);
2373         synchronize_rcu();
2374         btrfs_free_device(tgtdev);
2375 }
2376
2377 static struct btrfs_device *btrfs_find_device_by_path(
2378                 struct btrfs_fs_info *fs_info, const char *device_path)
2379 {
2380         int ret = 0;
2381         struct btrfs_super_block *disk_super;
2382         u64 devid;
2383         u8 *dev_uuid;
2384         struct block_device *bdev;
2385         struct btrfs_device *device;
2386
2387         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2388                                     fs_info->bdev_holder, 0, &bdev, &disk_super);
2389         if (ret)
2390                 return ERR_PTR(ret);
2391
2392         devid = btrfs_stack_device_id(&disk_super->dev_item);
2393         dev_uuid = disk_super->dev_item.uuid;
2394         if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2395                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2396                                            disk_super->metadata_uuid, true);
2397         else
2398                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2399                                            disk_super->fsid, true);
2400
2401         btrfs_release_disk_super(disk_super);
2402         if (!device)
2403                 device = ERR_PTR(-ENOENT);
2404         blkdev_put(bdev, FMODE_READ);
2405         return device;
2406 }
2407
2408 /*
2409  * Lookup a device given by device id, or the path if the id is 0.
2410  */
2411 struct btrfs_device *btrfs_find_device_by_devspec(
2412                 struct btrfs_fs_info *fs_info, u64 devid,
2413                 const char *device_path)
2414 {
2415         struct btrfs_device *device;
2416
2417         if (devid) {
2418                 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2419                                            NULL, true);
2420                 if (!device)
2421                         return ERR_PTR(-ENOENT);
2422                 return device;
2423         }
2424
2425         if (!device_path || !device_path[0])
2426                 return ERR_PTR(-EINVAL);
2427
2428         if (strcmp(device_path, "missing") == 0) {
2429                 /* Find first missing device */
2430                 list_for_each_entry(device, &fs_info->fs_devices->devices,
2431                                     dev_list) {
2432                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2433                                      &device->dev_state) && !device->bdev)
2434                                 return device;
2435                 }
2436                 return ERR_PTR(-ENOENT);
2437         }
2438
2439         return btrfs_find_device_by_path(fs_info, device_path);
2440 }
2441
2442 /*
2443  * does all the dirty work required for changing file system's UUID.
2444  */
2445 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2446 {
2447         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2448         struct btrfs_fs_devices *old_devices;
2449         struct btrfs_fs_devices *seed_devices;
2450         struct btrfs_super_block *disk_super = fs_info->super_copy;
2451         struct btrfs_device *device;
2452         u64 super_flags;
2453
2454         lockdep_assert_held(&uuid_mutex);
2455         if (!fs_devices->seeding)
2456                 return -EINVAL;
2457
2458         /*
2459          * Private copy of the seed devices, anchored at
2460          * fs_info->fs_devices->seed_list
2461          */
2462         seed_devices = alloc_fs_devices(NULL, NULL);
2463         if (IS_ERR(seed_devices))
2464                 return PTR_ERR(seed_devices);
2465
2466         /*
2467          * It's necessary to retain a copy of the original seed fs_devices in
2468          * fs_uuids so that filesystems which have been seeded can successfully
2469          * reference the seed device from open_seed_devices. This also supports
2470          * multiple fs seed.
2471          */
2472         old_devices = clone_fs_devices(fs_devices);
2473         if (IS_ERR(old_devices)) {
2474                 kfree(seed_devices);
2475                 return PTR_ERR(old_devices);
2476         }
2477
2478         list_add(&old_devices->fs_list, &fs_uuids);
2479
2480         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2481         seed_devices->opened = 1;
2482         INIT_LIST_HEAD(&seed_devices->devices);
2483         INIT_LIST_HEAD(&seed_devices->alloc_list);
2484         mutex_init(&seed_devices->device_list_mutex);
2485
2486         mutex_lock(&fs_devices->device_list_mutex);
2487         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2488                               synchronize_rcu);
2489         list_for_each_entry(device, &seed_devices->devices, dev_list)
2490                 device->fs_devices = seed_devices;
2491
2492         fs_devices->seeding = false;
2493         fs_devices->num_devices = 0;
2494         fs_devices->open_devices = 0;
2495         fs_devices->missing_devices = 0;
2496         fs_devices->rotating = false;
2497         list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2498
2499         generate_random_uuid(fs_devices->fsid);
2500         memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2501         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2502         mutex_unlock(&fs_devices->device_list_mutex);
2503
2504         super_flags = btrfs_super_flags(disk_super) &
2505                       ~BTRFS_SUPER_FLAG_SEEDING;
2506         btrfs_set_super_flags(disk_super, super_flags);
2507
2508         return 0;
2509 }
2510
2511 /*
2512  * Store the expected generation for seed devices in device items.
2513  */
2514 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2515 {
2516         struct btrfs_fs_info *fs_info = trans->fs_info;
2517         struct btrfs_root *root = fs_info->chunk_root;
2518         struct btrfs_path *path;
2519         struct extent_buffer *leaf;
2520         struct btrfs_dev_item *dev_item;
2521         struct btrfs_device *device;
2522         struct btrfs_key key;
2523         u8 fs_uuid[BTRFS_FSID_SIZE];
2524         u8 dev_uuid[BTRFS_UUID_SIZE];
2525         u64 devid;
2526         int ret;
2527
2528         path = btrfs_alloc_path();
2529         if (!path)
2530                 return -ENOMEM;
2531
2532         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2533         key.offset = 0;
2534         key.type = BTRFS_DEV_ITEM_KEY;
2535
2536         while (1) {
2537                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2538                 if (ret < 0)
2539                         goto error;
2540
2541                 leaf = path->nodes[0];
2542 next_slot:
2543                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2544                         ret = btrfs_next_leaf(root, path);
2545                         if (ret > 0)
2546                                 break;
2547                         if (ret < 0)
2548                                 goto error;
2549                         leaf = path->nodes[0];
2550                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2551                         btrfs_release_path(path);
2552                         continue;
2553                 }
2554
2555                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2556                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2557                     key.type != BTRFS_DEV_ITEM_KEY)
2558                         break;
2559
2560                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2561                                           struct btrfs_dev_item);
2562                 devid = btrfs_device_id(leaf, dev_item);
2563                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2564                                    BTRFS_UUID_SIZE);
2565                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2566                                    BTRFS_FSID_SIZE);
2567                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2568                                            fs_uuid, true);
2569                 BUG_ON(!device); /* Logic error */
2570
2571                 if (device->fs_devices->seeding) {
2572                         btrfs_set_device_generation(leaf, dev_item,
2573                                                     device->generation);
2574                         btrfs_mark_buffer_dirty(leaf);
2575                 }
2576
2577                 path->slots[0]++;
2578                 goto next_slot;
2579         }
2580         ret = 0;
2581 error:
2582         btrfs_free_path(path);
2583         return ret;
2584 }
2585
2586 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2587 {
2588         struct btrfs_root *root = fs_info->dev_root;
2589         struct request_queue *q;
2590         struct btrfs_trans_handle *trans;
2591         struct btrfs_device *device;
2592         struct block_device *bdev;
2593         struct super_block *sb = fs_info->sb;
2594         struct rcu_string *name;
2595         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2596         u64 orig_super_total_bytes;
2597         u64 orig_super_num_devices;
2598         int seeding_dev = 0;
2599         int ret = 0;
2600         bool locked = false;
2601
2602         if (sb_rdonly(sb) && !fs_devices->seeding)
2603                 return -EROFS;
2604
2605         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2606                                   fs_info->bdev_holder);
2607         if (IS_ERR(bdev))
2608                 return PTR_ERR(bdev);
2609
2610         if (fs_devices->seeding) {
2611                 seeding_dev = 1;
2612                 down_write(&sb->s_umount);
2613                 mutex_lock(&uuid_mutex);
2614                 locked = true;
2615         }
2616
2617         sync_blockdev(bdev);
2618
2619         rcu_read_lock();
2620         list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2621                 if (device->bdev == bdev) {
2622                         ret = -EEXIST;
2623                         rcu_read_unlock();
2624                         goto error;
2625                 }
2626         }
2627         rcu_read_unlock();
2628
2629         device = btrfs_alloc_device(fs_info, NULL, NULL);
2630         if (IS_ERR(device)) {
2631                 /* we can safely leave the fs_devices entry around */
2632                 ret = PTR_ERR(device);
2633                 goto error;
2634         }
2635
2636         name = rcu_string_strdup(device_path, GFP_KERNEL);
2637         if (!name) {
2638                 ret = -ENOMEM;
2639                 goto error_free_device;
2640         }
2641         rcu_assign_pointer(device->name, name);
2642
2643         trans = btrfs_start_transaction(root, 0);
2644         if (IS_ERR(trans)) {
2645                 ret = PTR_ERR(trans);
2646                 goto error_free_device;
2647         }
2648
2649         q = bdev_get_queue(bdev);
2650         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2651         device->generation = trans->transid;
2652         device->io_width = fs_info->sectorsize;
2653         device->io_align = fs_info->sectorsize;
2654         device->sector_size = fs_info->sectorsize;
2655         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2656                                          fs_info->sectorsize);
2657         device->disk_total_bytes = device->total_bytes;
2658         device->commit_total_bytes = device->total_bytes;
2659         device->fs_info = fs_info;
2660         device->bdev = bdev;
2661         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2662         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2663         device->mode = FMODE_EXCL;
2664         device->dev_stats_valid = 1;
2665         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2666
2667         if (seeding_dev) {
2668                 sb->s_flags &= ~SB_RDONLY;
2669                 ret = btrfs_prepare_sprout(fs_info);
2670                 if (ret) {
2671                         btrfs_abort_transaction(trans, ret);
2672                         goto error_trans;
2673                 }
2674         }
2675
2676         device->fs_devices = fs_devices;
2677
2678         mutex_lock(&fs_devices->device_list_mutex);
2679         mutex_lock(&fs_info->chunk_mutex);
2680         list_add_rcu(&device->dev_list, &fs_devices->devices);
2681         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2682         fs_devices->num_devices++;
2683         fs_devices->open_devices++;
2684         fs_devices->rw_devices++;
2685         fs_devices->total_devices++;
2686         fs_devices->total_rw_bytes += device->total_bytes;
2687
2688         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2689
2690         if (!blk_queue_nonrot(q))
2691                 fs_devices->rotating = true;
2692
2693         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2694         btrfs_set_super_total_bytes(fs_info->super_copy,
2695                 round_down(orig_super_total_bytes + device->total_bytes,
2696                            fs_info->sectorsize));
2697
2698         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2699         btrfs_set_super_num_devices(fs_info->super_copy,
2700                                     orig_super_num_devices + 1);
2701
2702         /*
2703          * we've got more storage, clear any full flags on the space
2704          * infos
2705          */
2706         btrfs_clear_space_info_full(fs_info);
2707
2708         mutex_unlock(&fs_info->chunk_mutex);
2709
2710         /* Add sysfs device entry */
2711         btrfs_sysfs_add_device(device);
2712
2713         mutex_unlock(&fs_devices->device_list_mutex);
2714
2715         if (seeding_dev) {
2716                 mutex_lock(&fs_info->chunk_mutex);
2717                 ret = init_first_rw_device(trans);
2718                 mutex_unlock(&fs_info->chunk_mutex);
2719                 if (ret) {
2720                         btrfs_abort_transaction(trans, ret);
2721                         goto error_sysfs;
2722                 }
2723         }
2724
2725         ret = btrfs_add_dev_item(trans, device);
2726         if (ret) {
2727                 btrfs_abort_transaction(trans, ret);
2728                 goto error_sysfs;
2729         }
2730
2731         if (seeding_dev) {
2732                 ret = btrfs_finish_sprout(trans);
2733                 if (ret) {
2734                         btrfs_abort_transaction(trans, ret);
2735                         goto error_sysfs;
2736                 }
2737
2738                 /*
2739                  * fs_devices now represents the newly sprouted filesystem and
2740                  * its fsid has been changed by btrfs_prepare_sprout
2741                  */
2742                 btrfs_sysfs_update_sprout_fsid(fs_devices);
2743         }
2744
2745         ret = btrfs_commit_transaction(trans);
2746
2747         if (seeding_dev) {
2748                 mutex_unlock(&uuid_mutex);
2749                 up_write(&sb->s_umount);
2750                 locked = false;
2751
2752                 if (ret) /* transaction commit */
2753                         return ret;
2754
2755                 ret = btrfs_relocate_sys_chunks(fs_info);
2756                 if (ret < 0)
2757                         btrfs_handle_fs_error(fs_info, ret,
2758                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2759                 trans = btrfs_attach_transaction(root);
2760                 if (IS_ERR(trans)) {
2761                         if (PTR_ERR(trans) == -ENOENT)
2762                                 return 0;
2763                         ret = PTR_ERR(trans);
2764                         trans = NULL;
2765                         goto error_sysfs;
2766                 }
2767                 ret = btrfs_commit_transaction(trans);
2768         }
2769
2770         /*
2771          * Now that we have written a new super block to this device, check all
2772          * other fs_devices list if device_path alienates any other scanned
2773          * device.
2774          * We can ignore the return value as it typically returns -EINVAL and
2775          * only succeeds if the device was an alien.
2776          */
2777         btrfs_forget_devices(device_path);
2778
2779         /* Update ctime/mtime for blkid or udev */
2780         update_dev_time(device_path);
2781
2782         return ret;
2783
2784 error_sysfs:
2785         btrfs_sysfs_remove_device(device);
2786         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2787         mutex_lock(&fs_info->chunk_mutex);
2788         list_del_rcu(&device->dev_list);
2789         list_del(&device->dev_alloc_list);
2790         fs_info->fs_devices->num_devices--;
2791         fs_info->fs_devices->open_devices--;
2792         fs_info->fs_devices->rw_devices--;
2793         fs_info->fs_devices->total_devices--;
2794         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2795         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2796         btrfs_set_super_total_bytes(fs_info->super_copy,
2797                                     orig_super_total_bytes);
2798         btrfs_set_super_num_devices(fs_info->super_copy,
2799                                     orig_super_num_devices);
2800         mutex_unlock(&fs_info->chunk_mutex);
2801         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2802 error_trans:
2803         if (seeding_dev)
2804                 sb->s_flags |= SB_RDONLY;
2805         if (trans)
2806                 btrfs_end_transaction(trans);
2807 error_free_device:
2808         btrfs_free_device(device);
2809 error:
2810         blkdev_put(bdev, FMODE_EXCL);
2811         if (locked) {
2812                 mutex_unlock(&uuid_mutex);
2813                 up_write(&sb->s_umount);
2814         }
2815         return ret;
2816 }
2817
2818 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2819                                         struct btrfs_device *device)
2820 {
2821         int ret;
2822         struct btrfs_path *path;
2823         struct btrfs_root *root = device->fs_info->chunk_root;
2824         struct btrfs_dev_item *dev_item;
2825         struct extent_buffer *leaf;
2826         struct btrfs_key key;
2827
2828         path = btrfs_alloc_path();
2829         if (!path)
2830                 return -ENOMEM;
2831
2832         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2833         key.type = BTRFS_DEV_ITEM_KEY;
2834         key.offset = device->devid;
2835
2836         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2837         if (ret < 0)
2838                 goto out;
2839
2840         if (ret > 0) {
2841                 ret = -ENOENT;
2842                 goto out;
2843         }
2844
2845         leaf = path->nodes[0];
2846         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2847
2848         btrfs_set_device_id(leaf, dev_item, device->devid);
2849         btrfs_set_device_type(leaf, dev_item, device->type);
2850         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2851         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2852         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2853         btrfs_set_device_total_bytes(leaf, dev_item,
2854                                      btrfs_device_get_disk_total_bytes(device));
2855         btrfs_set_device_bytes_used(leaf, dev_item,
2856                                     btrfs_device_get_bytes_used(device));
2857         btrfs_mark_buffer_dirty(leaf);
2858
2859 out:
2860         btrfs_free_path(path);
2861         return ret;
2862 }
2863
2864 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2865                       struct btrfs_device *device, u64 new_size)
2866 {
2867         struct btrfs_fs_info *fs_info = device->fs_info;
2868         struct btrfs_super_block *super_copy = fs_info->super_copy;
2869         u64 old_total;
2870         u64 diff;
2871
2872         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2873                 return -EACCES;
2874
2875         new_size = round_down(new_size, fs_info->sectorsize);
2876
2877         mutex_lock(&fs_info->chunk_mutex);
2878         old_total = btrfs_super_total_bytes(super_copy);
2879         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2880
2881         if (new_size <= device->total_bytes ||
2882             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2883                 mutex_unlock(&fs_info->chunk_mutex);
2884                 return -EINVAL;
2885         }
2886
2887         btrfs_set_super_total_bytes(super_copy,
2888                         round_down(old_total + diff, fs_info->sectorsize));
2889         device->fs_devices->total_rw_bytes += diff;
2890
2891         btrfs_device_set_total_bytes(device, new_size);
2892         btrfs_device_set_disk_total_bytes(device, new_size);
2893         btrfs_clear_space_info_full(device->fs_info);
2894         if (list_empty(&device->post_commit_list))
2895                 list_add_tail(&device->post_commit_list,
2896                               &trans->transaction->dev_update_list);
2897         mutex_unlock(&fs_info->chunk_mutex);
2898
2899         return btrfs_update_device(trans, device);
2900 }
2901
2902 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2903 {
2904         struct btrfs_fs_info *fs_info = trans->fs_info;
2905         struct btrfs_root *root = fs_info->chunk_root;
2906         int ret;
2907         struct btrfs_path *path;
2908         struct btrfs_key key;
2909
2910         path = btrfs_alloc_path();
2911         if (!path)
2912                 return -ENOMEM;
2913
2914         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2915         key.offset = chunk_offset;
2916         key.type = BTRFS_CHUNK_ITEM_KEY;
2917
2918         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2919         if (ret < 0)
2920                 goto out;
2921         else if (ret > 0) { /* Logic error or corruption */
2922                 btrfs_handle_fs_error(fs_info, -ENOENT,
2923                                       "Failed lookup while freeing chunk.");
2924                 ret = -ENOENT;
2925                 goto out;
2926         }
2927
2928         ret = btrfs_del_item(trans, root, path);
2929         if (ret < 0)
2930                 btrfs_handle_fs_error(fs_info, ret,
2931                                       "Failed to delete chunk item.");
2932 out:
2933         btrfs_free_path(path);
2934         return ret;
2935 }
2936
2937 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2938 {
2939         struct btrfs_super_block *super_copy = fs_info->super_copy;
2940         struct btrfs_disk_key *disk_key;
2941         struct btrfs_chunk *chunk;
2942         u8 *ptr;
2943         int ret = 0;
2944         u32 num_stripes;
2945         u32 array_size;
2946         u32 len = 0;
2947         u32 cur;
2948         struct btrfs_key key;
2949
2950         mutex_lock(&fs_info->chunk_mutex);
2951         array_size = btrfs_super_sys_array_size(super_copy);
2952
2953         ptr = super_copy->sys_chunk_array;
2954         cur = 0;
2955
2956         while (cur < array_size) {
2957                 disk_key = (struct btrfs_disk_key *)ptr;
2958                 btrfs_disk_key_to_cpu(&key, disk_key);
2959
2960                 len = sizeof(*disk_key);
2961
2962                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2963                         chunk = (struct btrfs_chunk *)(ptr + len);
2964                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2965                         len += btrfs_chunk_item_size(num_stripes);
2966                 } else {
2967                         ret = -EIO;
2968                         break;
2969                 }
2970                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2971                     key.offset == chunk_offset) {
2972                         memmove(ptr, ptr + len, array_size - (cur + len));
2973                         array_size -= len;
2974                         btrfs_set_super_sys_array_size(super_copy, array_size);
2975                 } else {
2976                         ptr += len;
2977                         cur += len;
2978                 }
2979         }
2980         mutex_unlock(&fs_info->chunk_mutex);
2981         return ret;
2982 }
2983
2984 /*
2985  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2986  * @logical: Logical block offset in bytes.
2987  * @length: Length of extent in bytes.
2988  *
2989  * Return: Chunk mapping or ERR_PTR.
2990  */
2991 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2992                                        u64 logical, u64 length)
2993 {
2994         struct extent_map_tree *em_tree;
2995         struct extent_map *em;
2996
2997         em_tree = &fs_info->mapping_tree;
2998         read_lock(&em_tree->lock);
2999         em = lookup_extent_mapping(em_tree, logical, length);
3000         read_unlock(&em_tree->lock);
3001
3002         if (!em) {
3003                 btrfs_crit(fs_info,
3004                            "unable to find chunk map for logical %llu length %llu",
3005                            logical, length);
3006                 return ERR_PTR(-EINVAL);
3007         }
3008
3009         if (em->start > logical || em->start + em->len <= logical) {
3010                 btrfs_crit(fs_info,
3011                            "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3012                            logical, logical + length, em->start, em->start + em->len);
3013                 free_extent_map(em);
3014                 return ERR_PTR(-EINVAL);
3015         }
3016
3017         /* callers are responsible for dropping em's ref. */
3018         return em;
3019 }
3020
3021 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3022 {
3023         struct btrfs_fs_info *fs_info = trans->fs_info;
3024         struct extent_map *em;
3025         struct map_lookup *map;
3026         u64 dev_extent_len = 0;
3027         int i, ret = 0;
3028         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3029
3030         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3031         if (IS_ERR(em)) {
3032                 /*
3033                  * This is a logic error, but we don't want to just rely on the
3034                  * user having built with ASSERT enabled, so if ASSERT doesn't
3035                  * do anything we still error out.
3036                  */
3037                 ASSERT(0);
3038                 return PTR_ERR(em);
3039         }
3040         map = em->map_lookup;
3041         mutex_lock(&fs_info->chunk_mutex);
3042         check_system_chunk(trans, map->type);
3043         mutex_unlock(&fs_info->chunk_mutex);
3044
3045         /*
3046          * Take the device list mutex to prevent races with the final phase of
3047          * a device replace operation that replaces the device object associated
3048          * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3049          */
3050         mutex_lock(&fs_devices->device_list_mutex);
3051         for (i = 0; i < map->num_stripes; i++) {
3052                 struct btrfs_device *device = map->stripes[i].dev;
3053                 ret = btrfs_free_dev_extent(trans, device,
3054                                             map->stripes[i].physical,
3055                                             &dev_extent_len);
3056                 if (ret) {
3057                         mutex_unlock(&fs_devices->device_list_mutex);
3058                         btrfs_abort_transaction(trans, ret);
3059                         goto out;
3060                 }
3061
3062                 if (device->bytes_used > 0) {
3063                         mutex_lock(&fs_info->chunk_mutex);
3064                         btrfs_device_set_bytes_used(device,
3065                                         device->bytes_used - dev_extent_len);
3066                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3067                         btrfs_clear_space_info_full(fs_info);
3068                         mutex_unlock(&fs_info->chunk_mutex);
3069                 }
3070
3071                 ret = btrfs_update_device(trans, device);
3072                 if (ret) {
3073                         mutex_unlock(&fs_devices->device_list_mutex);
3074                         btrfs_abort_transaction(trans, ret);
3075                         goto out;
3076                 }
3077         }
3078         mutex_unlock(&fs_devices->device_list_mutex);
3079
3080         ret = btrfs_free_chunk(trans, chunk_offset);
3081         if (ret) {
3082                 btrfs_abort_transaction(trans, ret);
3083                 goto out;
3084         }
3085
3086         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3087
3088         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3089                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3090                 if (ret) {
3091                         btrfs_abort_transaction(trans, ret);
3092                         goto out;
3093                 }
3094         }
3095
3096         ret = btrfs_remove_block_group(trans, chunk_offset, em);
3097         if (ret) {
3098                 btrfs_abort_transaction(trans, ret);
3099                 goto out;
3100         }
3101
3102 out:
3103         /* once for us */
3104         free_extent_map(em);
3105         return ret;
3106 }
3107
3108 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3109 {
3110         struct btrfs_root *root = fs_info->chunk_root;
3111         struct btrfs_trans_handle *trans;
3112         struct btrfs_block_group *block_group;
3113         int ret;
3114
3115         /*
3116          * Prevent races with automatic removal of unused block groups.
3117          * After we relocate and before we remove the chunk with offset
3118          * chunk_offset, automatic removal of the block group can kick in,
3119          * resulting in a failure when calling btrfs_remove_chunk() below.
3120          *
3121          * Make sure to acquire this mutex before doing a tree search (dev
3122          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3123          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3124          * we release the path used to search the chunk/dev tree and before
3125          * the current task acquires this mutex and calls us.
3126          */
3127         lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3128
3129         /* step one, relocate all the extents inside this chunk */
3130         btrfs_scrub_pause(fs_info);
3131         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3132         btrfs_scrub_continue(fs_info);
3133         if (ret)
3134                 return ret;
3135
3136         block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3137         if (!block_group)
3138                 return -ENOENT;
3139         btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3140         btrfs_put_block_group(block_group);
3141
3142         trans = btrfs_start_trans_remove_block_group(root->fs_info,
3143                                                      chunk_offset);
3144         if (IS_ERR(trans)) {
3145                 ret = PTR_ERR(trans);
3146                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3147                 return ret;
3148         }
3149
3150         /*
3151          * step two, delete the device extents and the
3152          * chunk tree entries
3153          */
3154         ret = btrfs_remove_chunk(trans, chunk_offset);
3155         btrfs_end_transaction(trans);
3156         return ret;
3157 }
3158
3159 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3160 {
3161         struct btrfs_root *chunk_root = fs_info->chunk_root;
3162         struct btrfs_path *path;
3163         struct extent_buffer *leaf;
3164         struct btrfs_chunk *chunk;
3165         struct btrfs_key key;
3166         struct btrfs_key found_key;
3167         u64 chunk_type;
3168         bool retried = false;
3169         int failed = 0;
3170         int ret;
3171
3172         path = btrfs_alloc_path();
3173         if (!path)
3174                 return -ENOMEM;
3175
3176 again:
3177         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3178         key.offset = (u64)-1;
3179         key.type = BTRFS_CHUNK_ITEM_KEY;
3180
3181         while (1) {
3182                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3183                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3184                 if (ret < 0) {
3185                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3186                         goto error;
3187                 }
3188                 if (ret == 0) {
3189                         /*
3190                          * On the first search we would find chunk tree with
3191                          * offset -1, which is not possible. On subsequent
3192                          * loops this would find an existing item on an invalid
3193                          * offset (one less than the previous one, wrong
3194                          * alignment and size).
3195                          */
3196                         ret = -EUCLEAN;
3197                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3198                         goto error;
3199                 }
3200
3201                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3202                                           key.type);
3203                 if (ret)
3204                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3205                 if (ret < 0)
3206                         goto error;
3207                 if (ret > 0)
3208                         break;
3209
3210                 leaf = path->nodes[0];
3211                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3212
3213                 chunk = btrfs_item_ptr(leaf, path->slots[0],
3214                                        struct btrfs_chunk);
3215                 chunk_type = btrfs_chunk_type(leaf, chunk);
3216                 btrfs_release_path(path);
3217
3218                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3219                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3220                         if (ret == -ENOSPC)
3221                                 failed++;
3222                         else
3223                                 BUG_ON(ret);
3224                 }
3225                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3226
3227                 if (found_key.offset == 0)
3228                         break;
3229                 key.offset = found_key.offset - 1;
3230         }
3231         ret = 0;
3232         if (failed && !retried) {
3233                 failed = 0;
3234                 retried = true;
3235                 goto again;
3236         } else if (WARN_ON(failed && retried)) {
3237                 ret = -ENOSPC;
3238         }
3239 error:
3240         btrfs_free_path(path);
3241         return ret;
3242 }
3243
3244 /*
3245  * return 1 : allocate a data chunk successfully,
3246  * return <0: errors during allocating a data chunk,
3247  * return 0 : no need to allocate a data chunk.
3248  */
3249 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3250                                       u64 chunk_offset)
3251 {
3252         struct btrfs_block_group *cache;
3253         u64 bytes_used;
3254         u64 chunk_type;
3255
3256         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3257         ASSERT(cache);
3258         chunk_type = cache->flags;
3259         btrfs_put_block_group(cache);
3260
3261         if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3262                 return 0;
3263
3264         spin_lock(&fs_info->data_sinfo->lock);
3265         bytes_used = fs_info->data_sinfo->bytes_used;
3266         spin_unlock(&fs_info->data_sinfo->lock);
3267
3268         if (!bytes_used) {
3269                 struct btrfs_trans_handle *trans;
3270                 int ret;
3271
3272                 trans = btrfs_join_transaction(fs_info->tree_root);
3273                 if (IS_ERR(trans))
3274                         return PTR_ERR(trans);
3275
3276                 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3277                 btrfs_end_transaction(trans);
3278                 if (ret < 0)
3279                         return ret;
3280                 return 1;
3281         }
3282
3283         return 0;
3284 }
3285
3286 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3287                                struct btrfs_balance_control *bctl)
3288 {
3289         struct btrfs_root *root = fs_info->tree_root;
3290         struct btrfs_trans_handle *trans;
3291         struct btrfs_balance_item *item;
3292         struct btrfs_disk_balance_args disk_bargs;
3293         struct btrfs_path *path;
3294         struct extent_buffer *leaf;
3295         struct btrfs_key key;
3296         int ret, err;
3297
3298         path = btrfs_alloc_path();
3299         if (!path)
3300                 return -ENOMEM;
3301
3302         trans = btrfs_start_transaction(root, 0);
3303         if (IS_ERR(trans)) {
3304                 btrfs_free_path(path);
3305                 return PTR_ERR(trans);
3306         }
3307
3308         key.objectid = BTRFS_BALANCE_OBJECTID;
3309         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3310         key.offset = 0;
3311
3312         ret = btrfs_insert_empty_item(trans, root, path, &key,
3313                                       sizeof(*item));
3314         if (ret)
3315                 goto out;
3316
3317         leaf = path->nodes[0];
3318         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3319
3320         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3321
3322         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3323         btrfs_set_balance_data(leaf, item, &disk_bargs);
3324         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3325         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3326         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3327         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3328
3329         btrfs_set_balance_flags(leaf, item, bctl->flags);
3330
3331         btrfs_mark_buffer_dirty(leaf);
3332 out:
3333         btrfs_free_path(path);
3334         err = btrfs_commit_transaction(trans);
3335         if (err && !ret)
3336                 ret = err;
3337         return ret;
3338 }
3339
3340 static int del_balance_item(struct btrfs_fs_info *fs_info)
3341 {
3342         struct btrfs_root *root = fs_info->tree_root;
3343         struct btrfs_trans_handle *trans;
3344         struct btrfs_path *path;
3345         struct btrfs_key key;
3346         int ret, err;
3347
3348         path = btrfs_alloc_path();
3349         if (!path)
3350                 return -ENOMEM;
3351
3352         trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3353         if (IS_ERR(trans)) {
3354                 btrfs_free_path(path);
3355                 return PTR_ERR(trans);
3356         }
3357
3358         key.objectid = BTRFS_BALANCE_OBJECTID;
3359         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3360         key.offset = 0;
3361
3362         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3363         if (ret < 0)
3364                 goto out;
3365         if (ret > 0) {
3366                 ret = -ENOENT;
3367                 goto out;
3368         }
3369
3370         ret = btrfs_del_item(trans, root, path);
3371 out:
3372         btrfs_free_path(path);
3373         err = btrfs_commit_transaction(trans);
3374         if (err && !ret)
3375                 ret = err;
3376         return ret;
3377 }
3378
3379 /*
3380  * This is a heuristic used to reduce the number of chunks balanced on
3381  * resume after balance was interrupted.
3382  */
3383 static void update_balance_args(struct btrfs_balance_control *bctl)
3384 {
3385         /*
3386          * Turn on soft mode for chunk types that were being converted.
3387          */
3388         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3389                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3390         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3391                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3392         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3393                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3394
3395         /*
3396          * Turn on usage filter if is not already used.  The idea is
3397          * that chunks that we have already balanced should be
3398          * reasonably full.  Don't do it for chunks that are being
3399          * converted - that will keep us from relocating unconverted
3400          * (albeit full) chunks.
3401          */
3402         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3403             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3404             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3405                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3406                 bctl->data.usage = 90;
3407         }
3408         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3409             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3410             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3411                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3412                 bctl->sys.usage = 90;
3413         }
3414         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3415             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3416             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3417                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3418                 bctl->meta.usage = 90;
3419         }
3420 }
3421
3422 /*
3423  * Clear the balance status in fs_info and delete the balance item from disk.
3424  */
3425 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3426 {
3427         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3428         int ret;
3429
3430         BUG_ON(!fs_info->balance_ctl);
3431
3432         spin_lock(&fs_info->balance_lock);
3433         fs_info->balance_ctl = NULL;
3434         spin_unlock(&fs_info->balance_lock);
3435
3436         kfree(bctl);
3437         ret = del_balance_item(fs_info);
3438         if (ret)
3439                 btrfs_handle_fs_error(fs_info, ret, NULL);
3440 }
3441
3442 /*
3443  * Balance filters.  Return 1 if chunk should be filtered out
3444  * (should not be balanced).
3445  */
3446 static int chunk_profiles_filter(u64 chunk_type,
3447                                  struct btrfs_balance_args *bargs)
3448 {
3449         chunk_type = chunk_to_extended(chunk_type) &
3450                                 BTRFS_EXTENDED_PROFILE_MASK;
3451
3452         if (bargs->profiles & chunk_type)
3453                 return 0;
3454
3455         return 1;
3456 }
3457
3458 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3459                               struct btrfs_balance_args *bargs)
3460 {
3461         struct btrfs_block_group *cache;
3462         u64 chunk_used;
3463         u64 user_thresh_min;
3464         u64 user_thresh_max;
3465         int ret = 1;
3466
3467         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3468         chunk_used = cache->used;
3469
3470         if (bargs->usage_min == 0)
3471                 user_thresh_min = 0;
3472         else
3473                 user_thresh_min = div_factor_fine(cache->length,
3474                                                   bargs->usage_min);
3475
3476         if (bargs->usage_max == 0)
3477                 user_thresh_max = 1;
3478         else if (bargs->usage_max > 100)
3479                 user_thresh_max = cache->length;
3480         else
3481                 user_thresh_max = div_factor_fine(cache->length,
3482                                                   bargs->usage_max);
3483
3484         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3485                 ret = 0;
3486
3487         btrfs_put_block_group(cache);
3488         return ret;
3489 }
3490
3491 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3492                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3493 {
3494         struct btrfs_block_group *cache;
3495         u64 chunk_used, user_thresh;
3496         int ret = 1;
3497
3498         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3499         chunk_used = cache->used;
3500
3501         if (bargs->usage_min == 0)
3502                 user_thresh = 1;
3503         else if (bargs->usage > 100)
3504                 user_thresh = cache->length;
3505         else
3506                 user_thresh = div_factor_fine(cache->length, bargs->usage);
3507
3508         if (chunk_used < user_thresh)
3509                 ret = 0;
3510
3511         btrfs_put_block_group(cache);
3512         return ret;
3513 }
3514
3515 static int chunk_devid_filter(struct extent_buffer *leaf,
3516                               struct btrfs_chunk *chunk,
3517                               struct btrfs_balance_args *bargs)
3518 {
3519         struct btrfs_stripe *stripe;
3520         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3521         int i;
3522
3523         for (i = 0; i < num_stripes; i++) {
3524                 stripe = btrfs_stripe_nr(chunk, i);
3525                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3526                         return 0;
3527         }
3528
3529         return 1;
3530 }
3531
3532 static u64 calc_data_stripes(u64 type, int num_stripes)
3533 {
3534         const int index = btrfs_bg_flags_to_raid_index(type);
3535         const int ncopies = btrfs_raid_array[index].ncopies;
3536         const int nparity = btrfs_raid_array[index].nparity;
3537
3538         if (nparity)
3539                 return num_stripes - nparity;
3540         else
3541                 return num_stripes / ncopies;
3542 }
3543
3544 /* [pstart, pend) */
3545 static int chunk_drange_filter(struct extent_buffer *leaf,
3546                                struct btrfs_chunk *chunk,
3547                                struct btrfs_balance_args *bargs)
3548 {
3549         struct btrfs_stripe *stripe;
3550         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3551         u64 stripe_offset;
3552         u64 stripe_length;
3553         u64 type;
3554         int factor;
3555         int i;
3556
3557         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3558                 return 0;
3559
3560         type = btrfs_chunk_type(leaf, chunk);
3561         factor = calc_data_stripes(type, num_stripes);
3562
3563         for (i = 0; i < num_stripes; i++) {
3564                 stripe = btrfs_stripe_nr(chunk, i);
3565                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3566                         continue;
3567
3568                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3569                 stripe_length = btrfs_chunk_length(leaf, chunk);
3570                 stripe_length = div_u64(stripe_length, factor);
3571
3572                 if (stripe_offset < bargs->pend &&
3573                     stripe_offset + stripe_length > bargs->pstart)
3574                         return 0;
3575         }
3576
3577         return 1;
3578 }
3579
3580 /* [vstart, vend) */
3581 static int chunk_vrange_filter(struct extent_buffer *leaf,
3582                                struct btrfs_chunk *chunk,
3583                                u64 chunk_offset,
3584                                struct btrfs_balance_args *bargs)
3585 {
3586         if (chunk_offset < bargs->vend &&
3587             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3588                 /* at least part of the chunk is inside this vrange */
3589                 return 0;
3590
3591         return 1;
3592 }
3593
3594 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3595                                struct btrfs_chunk *chunk,
3596                                struct btrfs_balance_args *bargs)
3597 {
3598         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3599
3600         if (bargs->stripes_min <= num_stripes
3601                         && num_stripes <= bargs->stripes_max)
3602                 return 0;
3603
3604         return 1;
3605 }
3606
3607 static int chunk_soft_convert_filter(u64 chunk_type,
3608                                      struct btrfs_balance_args *bargs)
3609 {
3610         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3611                 return 0;
3612
3613         chunk_type = chunk_to_extended(chunk_type) &
3614                                 BTRFS_EXTENDED_PROFILE_MASK;
3615
3616         if (bargs->target == chunk_type)
3617                 return 1;
3618
3619         return 0;
3620 }
3621
3622 static int should_balance_chunk(struct extent_buffer *leaf,
3623                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3624 {
3625         struct btrfs_fs_info *fs_info = leaf->fs_info;
3626         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3627         struct btrfs_balance_args *bargs = NULL;
3628         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3629
3630         /* type filter */
3631         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3632               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3633                 return 0;
3634         }
3635
3636         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3637                 bargs = &bctl->data;
3638         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3639                 bargs = &bctl->sys;
3640         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3641                 bargs = &bctl->meta;
3642
3643         /* profiles filter */
3644         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3645             chunk_profiles_filter(chunk_type, bargs)) {
3646                 return 0;
3647         }
3648
3649         /* usage filter */
3650         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3651             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3652                 return 0;
3653         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3654             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3655                 return 0;
3656         }
3657
3658         /* devid filter */
3659         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3660             chunk_devid_filter(leaf, chunk, bargs)) {
3661                 return 0;
3662         }
3663
3664         /* drange filter, makes sense only with devid filter */
3665         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3666             chunk_drange_filter(leaf, chunk, bargs)) {
3667                 return 0;
3668         }
3669
3670         /* vrange filter */
3671         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3672             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3673                 return 0;
3674         }
3675
3676         /* stripes filter */
3677         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3678             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3679                 return 0;
3680         }
3681
3682         /* soft profile changing mode */
3683         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3684             chunk_soft_convert_filter(chunk_type, bargs)) {
3685                 return 0;
3686         }
3687
3688         /*
3689          * limited by count, must be the last filter
3690          */
3691         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3692                 if (bargs->limit == 0)
3693                         return 0;
3694                 else
3695                         bargs->limit--;
3696         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3697                 /*
3698                  * Same logic as the 'limit' filter; the minimum cannot be
3699                  * determined here because we do not have the global information
3700                  * about the count of all chunks that satisfy the filters.
3701                  */
3702                 if (bargs->limit_max == 0)
3703                         return 0;
3704                 else
3705                         bargs->limit_max--;
3706         }
3707
3708         return 1;
3709 }
3710
3711 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3712 {
3713         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3714         struct btrfs_root *chunk_root = fs_info->chunk_root;
3715         u64 chunk_type;
3716         struct btrfs_chunk *chunk;
3717         struct btrfs_path *path = NULL;
3718         struct btrfs_key key;
3719         struct btrfs_key found_key;
3720         struct extent_buffer *leaf;
3721         int slot;
3722         int ret;
3723         int enospc_errors = 0;
3724         bool counting = true;
3725         /* The single value limit and min/max limits use the same bytes in the */
3726         u64 limit_data = bctl->data.limit;
3727         u64 limit_meta = bctl->meta.limit;
3728         u64 limit_sys = bctl->sys.limit;
3729         u32 count_data = 0;
3730         u32 count_meta = 0;
3731         u32 count_sys = 0;
3732         int chunk_reserved = 0;
3733
3734         path = btrfs_alloc_path();
3735         if (!path) {
3736                 ret = -ENOMEM;
3737                 goto error;
3738         }
3739
3740         /* zero out stat counters */
3741         spin_lock(&fs_info->balance_lock);
3742         memset(&bctl->stat, 0, sizeof(bctl->stat));
3743         spin_unlock(&fs_info->balance_lock);
3744 again:
3745         if (!counting) {
3746                 /*
3747                  * The single value limit and min/max limits use the same bytes
3748                  * in the
3749                  */
3750                 bctl->data.limit = limit_data;
3751                 bctl->meta.limit = limit_meta;
3752                 bctl->sys.limit = limit_sys;
3753         }
3754         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3755         key.offset = (u64)-1;
3756         key.type = BTRFS_CHUNK_ITEM_KEY;
3757
3758         while (1) {
3759                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3760                     atomic_read(&fs_info->balance_cancel_req)) {
3761                         ret = -ECANCELED;
3762                         goto error;
3763                 }
3764
3765                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3766                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3767                 if (ret < 0) {
3768                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3769                         goto error;
3770                 }
3771
3772                 /*
3773                  * this shouldn't happen, it means the last relocate
3774                  * failed
3775                  */
3776                 if (ret == 0)
3777                         BUG(); /* FIXME break ? */
3778
3779                 ret = btrfs_previous_item(chunk_root, path, 0,
3780                                           BTRFS_CHUNK_ITEM_KEY);
3781                 if (ret) {
3782                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3783                         ret = 0;
3784                         break;
3785                 }
3786
3787                 leaf = path->nodes[0];
3788                 slot = path->slots[0];
3789                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3790
3791                 if (found_key.objectid != key.objectid) {
3792                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3793                         break;
3794                 }
3795
3796                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3797                 chunk_type = btrfs_chunk_type(leaf, chunk);
3798
3799                 if (!counting) {
3800                         spin_lock(&fs_info->balance_lock);
3801                         bctl->stat.considered++;
3802                         spin_unlock(&fs_info->balance_lock);
3803                 }
3804
3805                 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3806
3807                 btrfs_release_path(path);
3808                 if (!ret) {
3809                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3810                         goto loop;
3811                 }
3812
3813                 if (counting) {
3814                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3815                         spin_lock(&fs_info->balance_lock);
3816                         bctl->stat.expected++;
3817                         spin_unlock(&fs_info->balance_lock);
3818
3819                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3820                                 count_data++;
3821                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3822                                 count_sys++;
3823                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3824                                 count_meta++;
3825
3826                         goto loop;
3827                 }
3828
3829                 /*
3830                  * Apply limit_min filter, no need to check if the LIMITS
3831                  * filter is used, limit_min is 0 by default
3832                  */
3833                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3834                                         count_data < bctl->data.limit_min)
3835                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3836                                         count_meta < bctl->meta.limit_min)
3837                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3838                                         count_sys < bctl->sys.limit_min)) {
3839                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3840                         goto loop;
3841                 }
3842
3843                 if (!chunk_reserved) {
3844                         /*
3845                          * We may be relocating the only data chunk we have,
3846                          * which could potentially end up with losing data's
3847                          * raid profile, so lets allocate an empty one in
3848                          * advance.
3849                          */
3850                         ret = btrfs_may_alloc_data_chunk(fs_info,
3851                                                          found_key.offset);
3852                         if (ret < 0) {
3853                                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3854                                 goto error;
3855                         } else if (ret == 1) {
3856                                 chunk_reserved = 1;
3857                         }
3858                 }
3859
3860                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3861                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3862                 if (ret == -ENOSPC) {
3863                         enospc_errors++;
3864                 } else if (ret == -ETXTBSY) {
3865                         btrfs_info(fs_info,
3866            "skipping relocation of block group %llu due to active swapfile",
3867                                    found_key.offset);
3868                         ret = 0;
3869                 } else if (ret) {
3870                         goto error;
3871                 } else {
3872                         spin_lock(&fs_info->balance_lock);
3873                         bctl->stat.completed++;
3874                         spin_unlock(&fs_info->balance_lock);
3875                 }
3876 loop:
3877                 if (found_key.offset == 0)
3878                         break;
3879                 key.offset = found_key.offset - 1;
3880         }
3881
3882         if (counting) {
3883                 btrfs_release_path(path);
3884                 counting = false;
3885                 goto again;
3886         }
3887 error:
3888         btrfs_free_path(path);
3889         if (enospc_errors) {
3890                 btrfs_info(fs_info, "%d enospc errors during balance",
3891                            enospc_errors);
3892                 if (!ret)
3893                         ret = -ENOSPC;
3894         }
3895
3896         return ret;
3897 }
3898
3899 /**
3900  * alloc_profile_is_valid - see if a given profile is valid and reduced
3901  * @flags: profile to validate
3902  * @extended: if true @flags is treated as an extended profile
3903  */
3904 static int alloc_profile_is_valid(u64 flags, int extended)
3905 {
3906         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3907                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3908
3909         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3910
3911         /* 1) check that all other bits are zeroed */
3912         if (flags & ~mask)
3913                 return 0;
3914
3915         /* 2) see if profile is reduced */
3916         if (flags == 0)
3917                 return !extended; /* "0" is valid for usual profiles */
3918
3919         return has_single_bit_set(flags);
3920 }
3921
3922 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3923 {
3924         /* cancel requested || normal exit path */
3925         return atomic_read(&fs_info->balance_cancel_req) ||
3926                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3927                  atomic_read(&fs_info->balance_cancel_req) == 0);
3928 }
3929
3930 /*
3931  * Validate target profile against allowed profiles and return true if it's OK.
3932  * Otherwise print the error message and return false.
3933  */
3934 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3935                 const struct btrfs_balance_args *bargs,
3936                 u64 allowed, const char *type)
3937 {
3938         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3939                 return true;
3940
3941         /* Profile is valid and does not have bits outside of the allowed set */
3942         if (alloc_profile_is_valid(bargs->target, 1) &&
3943             (bargs->target & ~allowed) == 0)
3944                 return true;
3945
3946         btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3947                         type, btrfs_bg_type_to_raid_name(bargs->target));
3948         return false;
3949 }
3950
3951 /*
3952  * Fill @buf with textual description of balance filter flags @bargs, up to
3953  * @size_buf including the terminating null. The output may be trimmed if it
3954  * does not fit into the provided buffer.
3955  */
3956 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3957                                  u32 size_buf)
3958 {
3959         int ret;
3960         u32 size_bp = size_buf;
3961         char *bp = buf;
3962         u64 flags = bargs->flags;
3963         char tmp_buf[128] = {'\0'};
3964
3965         if (!flags)
3966                 return;
3967
3968 #define CHECK_APPEND_NOARG(a)                                           \
3969         do {                                                            \
3970                 ret = snprintf(bp, size_bp, (a));                       \
3971                 if (ret < 0 || ret >= size_bp)                          \
3972                         goto out_overflow;                              \
3973                 size_bp -= ret;                                         \
3974                 bp += ret;                                              \
3975         } while (0)
3976
3977 #define CHECK_APPEND_1ARG(a, v1)                                        \
3978         do {                                                            \
3979                 ret = snprintf(bp, size_bp, (a), (v1));                 \
3980                 if (ret < 0 || ret >= size_bp)                          \
3981                         goto out_overflow;                              \
3982                 size_bp -= ret;                                         \
3983                 bp += ret;                                              \
3984         } while (0)
3985
3986 #define CHECK_APPEND_2ARG(a, v1, v2)                                    \
3987         do {                                                            \
3988                 ret = snprintf(bp, size_bp, (a), (v1), (v2));           \
3989                 if (ret < 0 || ret >= size_bp)                          \
3990                         goto out_overflow;                              \
3991                 size_bp -= ret;                                         \
3992                 bp += ret;                                              \
3993         } while (0)
3994
3995         if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3996                 CHECK_APPEND_1ARG("convert=%s,",
3997                                   btrfs_bg_type_to_raid_name(bargs->target));
3998
3999         if (flags & BTRFS_BALANCE_ARGS_SOFT)
4000                 CHECK_APPEND_NOARG("soft,");
4001
4002         if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4003                 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4004                                             sizeof(tmp_buf));
4005                 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4006         }
4007
4008         if (flags & BTRFS_BALANCE_ARGS_USAGE)
4009                 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4010
4011         if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4012                 CHECK_APPEND_2ARG("usage=%u..%u,",
4013                                   bargs->usage_min, bargs->usage_max);
4014
4015         if (flags & BTRFS_BALANCE_ARGS_DEVID)
4016                 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4017
4018         if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4019                 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4020                                   bargs->pstart, bargs->pend);
4021
4022         if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4023                 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4024                                   bargs->vstart, bargs->vend);
4025
4026         if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4027                 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4028
4029         if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4030                 CHECK_APPEND_2ARG("limit=%u..%u,",
4031                                 bargs->limit_min, bargs->limit_max);
4032
4033         if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4034                 CHECK_APPEND_2ARG("stripes=%u..%u,",
4035                                   bargs->stripes_min, bargs->stripes_max);
4036
4037 #undef CHECK_APPEND_2ARG
4038 #undef CHECK_APPEND_1ARG
4039 #undef CHECK_APPEND_NOARG
4040
4041 out_overflow:
4042
4043         if (size_bp < size_buf)
4044                 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4045         else
4046                 buf[0] = '\0';
4047 }
4048
4049 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4050 {
4051         u32 size_buf = 1024;
4052         char tmp_buf[192] = {'\0'};
4053         char *buf;
4054         char *bp;
4055         u32 size_bp = size_buf;
4056         int ret;
4057         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4058
4059         buf = kzalloc(size_buf, GFP_KERNEL);
4060         if (!buf)
4061                 return;
4062
4063         bp = buf;
4064
4065 #define CHECK_APPEND_1ARG(a, v1)                                        \
4066         do {                                                            \
4067                 ret = snprintf(bp, size_bp, (a), (v1));                 \
4068                 if (ret < 0 || ret >= size_bp)                          \
4069                         goto out_overflow;                              \
4070                 size_bp -= ret;                                         \
4071                 bp += ret;                                              \
4072         } while (0)
4073
4074         if (bctl->flags & BTRFS_BALANCE_FORCE)
4075                 CHECK_APPEND_1ARG("%s", "-f ");
4076
4077         if (bctl->flags & BTRFS_BALANCE_DATA) {
4078                 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4079                 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4080         }
4081
4082         if (bctl->flags & BTRFS_BALANCE_METADATA) {
4083                 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4084                 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4085         }
4086
4087         if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4088                 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4089                 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4090         }
4091
4092 #undef CHECK_APPEND_1ARG
4093
4094 out_overflow:
4095
4096         if (size_bp < size_buf)
4097                 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4098         btrfs_info(fs_info, "balance: %s %s",
4099                    (bctl->flags & BTRFS_BALANCE_RESUME) ?
4100                    "resume" : "start", buf);
4101
4102         kfree(buf);
4103 }
4104
4105 /*
4106  * Should be called with balance mutexe held
4107  */
4108 int btrfs_balance(struct btrfs_fs_info *fs_info,
4109                   struct btrfs_balance_control *bctl,
4110                   struct btrfs_ioctl_balance_args *bargs)
4111 {
4112         u64 meta_target, data_target;
4113         u64 allowed;
4114         int mixed = 0;
4115         int ret;
4116         u64 num_devices;
4117         unsigned seq;
4118         bool reducing_redundancy;
4119         int i;
4120
4121         if (btrfs_fs_closing(fs_info) ||
4122             atomic_read(&fs_info->balance_pause_req) ||
4123             btrfs_should_cancel_balance(fs_info)) {
4124                 ret = -EINVAL;
4125                 goto out;
4126         }
4127
4128         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4129         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4130                 mixed = 1;
4131
4132         /*
4133          * In case of mixed groups both data and meta should be picked,
4134          * and identical options should be given for both of them.
4135          */
4136         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4137         if (mixed && (bctl->flags & allowed)) {
4138                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4139                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4140                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4141                         btrfs_err(fs_info,
4142           "balance: mixed groups data and metadata options must be the same");
4143                         ret = -EINVAL;
4144                         goto out;
4145                 }
4146         }
4147
4148         /*
4149          * rw_devices will not change at the moment, device add/delete/replace
4150          * are exclusive
4151          */
4152         num_devices = fs_info->fs_devices->rw_devices;
4153
4154         /*
4155          * SINGLE profile on-disk has no profile bit, but in-memory we have a
4156          * special bit for it, to make it easier to distinguish.  Thus we need
4157          * to set it manually, or balance would refuse the profile.
4158          */
4159         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4160         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4161                 if (num_devices >= btrfs_raid_array[i].devs_min)
4162                         allowed |= btrfs_raid_array[i].bg_flag;
4163
4164         if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4165             !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4166             !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4167                 ret = -EINVAL;
4168                 goto out;
4169         }
4170
4171         /*
4172          * Allow to reduce metadata or system integrity only if force set for
4173          * profiles with redundancy (copies, parity)
4174          */
4175         allowed = 0;
4176         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4177                 if (btrfs_raid_array[i].ncopies >= 2 ||
4178                     btrfs_raid_array[i].tolerated_failures >= 1)
4179                         allowed |= btrfs_raid_array[i].bg_flag;
4180         }
4181         do {
4182                 seq = read_seqbegin(&fs_info->profiles_lock);
4183
4184                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4185                      (fs_info->avail_system_alloc_bits & allowed) &&
4186                      !(bctl->sys.target & allowed)) ||
4187                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4188                      (fs_info->avail_metadata_alloc_bits & allowed) &&
4189                      !(bctl->meta.target & allowed)))
4190                         reducing_redundancy = true;
4191                 else
4192                         reducing_redundancy = false;
4193
4194                 /* if we're not converting, the target field is uninitialized */
4195                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4196                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4197                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4198                         bctl->data.target : fs_info->avail_data_alloc_bits;
4199         } while (read_seqretry(&fs_info->profiles_lock, seq));
4200
4201         if (reducing_redundancy) {
4202                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4203                         btrfs_info(fs_info,
4204                            "balance: force reducing metadata redundancy");
4205                 } else {
4206                         btrfs_err(fs_info,
4207         "balance: reduces metadata redundancy, use --force if you want this");
4208                         ret = -EINVAL;
4209                         goto out;
4210                 }
4211         }
4212
4213         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4214                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4215                 btrfs_warn(fs_info,
4216         "balance: metadata profile %s has lower redundancy than data profile %s",
4217                                 btrfs_bg_type_to_raid_name(meta_target),
4218                                 btrfs_bg_type_to_raid_name(data_target));
4219         }
4220
4221         if (fs_info->send_in_progress) {
4222                 btrfs_warn_rl(fs_info,
4223 "cannot run balance while send operations are in progress (%d in progress)",
4224                               fs_info->send_in_progress);
4225                 ret = -EAGAIN;
4226                 goto out;
4227         }
4228
4229         ret = insert_balance_item(fs_info, bctl);
4230         if (ret && ret != -EEXIST)
4231                 goto out;
4232
4233         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4234                 BUG_ON(ret == -EEXIST);
4235                 BUG_ON(fs_info->balance_ctl);
4236                 spin_lock(&fs_info->balance_lock);
4237                 fs_info->balance_ctl = bctl;
4238                 spin_unlock(&fs_info->balance_lock);
4239         } else {
4240                 BUG_ON(ret != -EEXIST);
4241                 spin_lock(&fs_info->balance_lock);
4242                 update_balance_args(bctl);
4243                 spin_unlock(&fs_info->balance_lock);
4244         }
4245
4246         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4247         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4248         describe_balance_start_or_resume(fs_info);
4249         mutex_unlock(&fs_info->balance_mutex);
4250
4251         ret = __btrfs_balance(fs_info);
4252
4253         mutex_lock(&fs_info->balance_mutex);
4254         if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4255                 btrfs_info(fs_info, "balance: paused");
4256         /*
4257          * Balance can be canceled by:
4258          *
4259          * - Regular cancel request
4260          *   Then ret == -ECANCELED and balance_cancel_req > 0
4261          *
4262          * - Fatal signal to "btrfs" process
4263          *   Either the signal caught by wait_reserve_ticket() and callers
4264          *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4265          *   got -ECANCELED.
4266          *   Either way, in this case balance_cancel_req = 0, and
4267          *   ret == -EINTR or ret == -ECANCELED.
4268          *
4269          * So here we only check the return value to catch canceled balance.
4270          */
4271         else if (ret == -ECANCELED || ret == -EINTR)
4272                 btrfs_info(fs_info, "balance: canceled");
4273         else
4274                 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4275
4276         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4277
4278         if (bargs) {
4279                 memset(bargs, 0, sizeof(*bargs));
4280                 btrfs_update_ioctl_balance_args(fs_info, bargs);
4281         }
4282
4283         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4284             balance_need_close(fs_info)) {
4285                 reset_balance_state(fs_info);
4286                 btrfs_exclop_finish(fs_info);
4287         }
4288
4289         wake_up(&fs_info->balance_wait_q);
4290
4291         return ret;
4292 out:
4293         if (bctl->flags & BTRFS_BALANCE_RESUME)
4294                 reset_balance_state(fs_info);
4295         else
4296                 kfree(bctl);
4297         btrfs_exclop_finish(fs_info);
4298
4299         return ret;
4300 }
4301
4302 static int balance_kthread(void *data)
4303 {
4304         struct btrfs_fs_info *fs_info = data;
4305         int ret = 0;
4306
4307         sb_start_write(fs_info->sb);
4308         mutex_lock(&fs_info->balance_mutex);
4309         if (fs_info->balance_ctl)
4310                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4311         mutex_unlock(&fs_info->balance_mutex);
4312         sb_end_write(fs_info->sb);
4313
4314         return ret;
4315 }
4316
4317 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4318 {
4319         struct task_struct *tsk;
4320
4321         mutex_lock(&fs_info->balance_mutex);
4322         if (!fs_info->balance_ctl) {
4323                 mutex_unlock(&fs_info->balance_mutex);
4324                 return 0;
4325         }
4326         mutex_unlock(&fs_info->balance_mutex);
4327
4328         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4329                 btrfs_info(fs_info, "balance: resume skipped");
4330                 return 0;
4331         }
4332
4333         /*
4334          * A ro->rw remount sequence should continue with the paused balance
4335          * regardless of who pauses it, system or the user as of now, so set
4336          * the resume flag.
4337          */
4338         spin_lock(&fs_info->balance_lock);
4339         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4340         spin_unlock(&fs_info->balance_lock);
4341
4342         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4343         return PTR_ERR_OR_ZERO(tsk);
4344 }
4345
4346 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4347 {
4348         struct btrfs_balance_control *bctl;
4349         struct btrfs_balance_item *item;
4350         struct btrfs_disk_balance_args disk_bargs;
4351         struct btrfs_path *path;
4352         struct extent_buffer *leaf;
4353         struct btrfs_key key;
4354         int ret;
4355
4356         path = btrfs_alloc_path();
4357         if (!path)
4358                 return -ENOMEM;
4359
4360         key.objectid = BTRFS_BALANCE_OBJECTID;
4361         key.type = BTRFS_TEMPORARY_ITEM_KEY;
4362         key.offset = 0;
4363
4364         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4365         if (ret < 0)
4366                 goto out;
4367         if (ret > 0) { /* ret = -ENOENT; */
4368                 ret = 0;
4369                 goto out;
4370         }
4371
4372         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4373         if (!bctl) {
4374                 ret = -ENOMEM;
4375                 goto out;
4376         }
4377
4378         leaf = path->nodes[0];
4379         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4380
4381         bctl->flags = btrfs_balance_flags(leaf, item);
4382         bctl->flags |= BTRFS_BALANCE_RESUME;
4383
4384         btrfs_balance_data(leaf, item, &disk_bargs);
4385         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4386         btrfs_balance_meta(leaf, item, &disk_bargs);
4387         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4388         btrfs_balance_sys(leaf, item, &disk_bargs);
4389         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4390
4391         /*
4392          * This should never happen, as the paused balance state is recovered
4393          * during mount without any chance of other exclusive ops to collide.
4394          *
4395          * This gives the exclusive op status to balance and keeps in paused
4396          * state until user intervention (cancel or umount). If the ownership
4397          * cannot be assigned, show a message but do not fail. The balance
4398          * is in a paused state and must have fs_info::balance_ctl properly
4399          * set up.
4400          */
4401         if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4402                 btrfs_warn(fs_info,
4403         "balance: cannot set exclusive op status, resume manually");
4404
4405         btrfs_release_path(path);
4406
4407         mutex_lock(&fs_info->balance_mutex);
4408         BUG_ON(fs_info->balance_ctl);
4409         spin_lock(&fs_info->balance_lock);
4410         fs_info->balance_ctl = bctl;
4411         spin_unlock(&fs_info->balance_lock);
4412         mutex_unlock(&fs_info->balance_mutex);
4413 out:
4414         btrfs_free_path(path);
4415         return ret;
4416 }
4417
4418 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4419 {
4420         int ret = 0;
4421
4422         mutex_lock(&fs_info->balance_mutex);
4423         if (!fs_info->balance_ctl) {
4424                 mutex_unlock(&fs_info->balance_mutex);
4425                 return -ENOTCONN;
4426         }
4427
4428         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4429                 atomic_inc(&fs_info->balance_pause_req);
4430                 mutex_unlock(&fs_info->balance_mutex);
4431
4432                 wait_event(fs_info->balance_wait_q,
4433                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4434
4435                 mutex_lock(&fs_info->balance_mutex);
4436                 /* we are good with balance_ctl ripped off from under us */
4437                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4438                 atomic_dec(&fs_info->balance_pause_req);
4439         } else {
4440                 ret = -ENOTCONN;
4441         }
4442
4443         mutex_unlock(&fs_info->balance_mutex);
4444         return ret;
4445 }
4446
4447 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4448 {
4449         mutex_lock(&fs_info->balance_mutex);
4450         if (!fs_info->balance_ctl) {
4451                 mutex_unlock(&fs_info->balance_mutex);
4452                 return -ENOTCONN;
4453         }
4454
4455         /*
4456          * A paused balance with the item stored on disk can be resumed at
4457          * mount time if the mount is read-write. Otherwise it's still paused
4458          * and we must not allow cancelling as it deletes the item.
4459          */
4460         if (sb_rdonly(fs_info->sb)) {
4461                 mutex_unlock(&fs_info->balance_mutex);
4462                 return -EROFS;
4463         }
4464
4465         atomic_inc(&fs_info->balance_cancel_req);
4466         /*
4467          * if we are running just wait and return, balance item is
4468          * deleted in btrfs_balance in this case
4469          */
4470         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4471                 mutex_unlock(&fs_info->balance_mutex);
4472                 wait_event(fs_info->balance_wait_q,
4473                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4474                 mutex_lock(&fs_info->balance_mutex);
4475         } else {
4476                 mutex_unlock(&fs_info->balance_mutex);
4477                 /*
4478                  * Lock released to allow other waiters to continue, we'll
4479                  * reexamine the status again.
4480                  */
4481                 mutex_lock(&fs_info->balance_mutex);
4482
4483                 if (fs_info->balance_ctl) {
4484                         reset_balance_state(fs_info);
4485                         btrfs_exclop_finish(fs_info);
4486                         btrfs_info(fs_info, "balance: canceled");
4487                 }
4488         }
4489
4490         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4491         atomic_dec(&fs_info->balance_cancel_req);
4492         mutex_unlock(&fs_info->balance_mutex);
4493         return 0;
4494 }
4495
4496 int btrfs_uuid_scan_kthread(void *data)
4497 {
4498         struct btrfs_fs_info *fs_info = data;
4499         struct btrfs_root *root = fs_info->tree_root;
4500         struct btrfs_key key;
4501         struct btrfs_path *path = NULL;
4502         int ret = 0;
4503         struct extent_buffer *eb;
4504         int slot;
4505         struct btrfs_root_item root_item;
4506         u32 item_size;
4507         struct btrfs_trans_handle *trans = NULL;
4508         bool closing = false;
4509
4510         path = btrfs_alloc_path();
4511         if (!path) {
4512                 ret = -ENOMEM;
4513                 goto out;
4514         }
4515
4516         key.objectid = 0;
4517         key.type = BTRFS_ROOT_ITEM_KEY;
4518         key.offset = 0;
4519
4520         while (1) {
4521                 if (btrfs_fs_closing(fs_info)) {
4522                         closing = true;
4523                         break;
4524                 }
4525                 ret = btrfs_search_forward(root, &key, path,
4526                                 BTRFS_OLDEST_GENERATION);
4527                 if (ret) {
4528                         if (ret > 0)
4529                                 ret = 0;
4530                         break;
4531                 }
4532
4533                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4534                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4535                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4536                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4537                         goto skip;
4538
4539                 eb = path->nodes[0];
4540                 slot = path->slots[0];
4541                 item_size = btrfs_item_size_nr(eb, slot);
4542                 if (item_size < sizeof(root_item))
4543                         goto skip;
4544
4545                 read_extent_buffer(eb, &root_item,
4546                                    btrfs_item_ptr_offset(eb, slot),
4547                                    (int)sizeof(root_item));
4548                 if (btrfs_root_refs(&root_item) == 0)
4549                         goto skip;
4550
4551                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4552                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4553                         if (trans)
4554                                 goto update_tree;
4555
4556                         btrfs_release_path(path);
4557                         /*
4558                          * 1 - subvol uuid item
4559                          * 1 - received_subvol uuid item
4560                          */
4561                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4562                         if (IS_ERR(trans)) {
4563                                 ret = PTR_ERR(trans);
4564                                 break;
4565                         }
4566                         continue;
4567                 } else {
4568                         goto skip;
4569                 }
4570 update_tree:
4571                 btrfs_release_path(path);
4572                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4573                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4574                                                   BTRFS_UUID_KEY_SUBVOL,
4575                                                   key.objectid);
4576                         if (ret < 0) {
4577                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4578                                         ret);
4579                                 break;
4580                         }
4581                 }
4582
4583                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4584                         ret = btrfs_uuid_tree_add(trans,
4585                                                   root_item.received_uuid,
4586                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4587                                                   key.objectid);
4588                         if (ret < 0) {
4589                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4590                                         ret);
4591                                 break;
4592                         }
4593                 }
4594
4595 skip:
4596                 btrfs_release_path(path);
4597                 if (trans) {
4598                         ret = btrfs_end_transaction(trans);
4599                         trans = NULL;
4600                         if (ret)
4601                                 break;
4602                 }
4603
4604                 if (key.offset < (u64)-1) {
4605                         key.offset++;
4606                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4607                         key.offset = 0;
4608                         key.type = BTRFS_ROOT_ITEM_KEY;
4609                 } else if (key.objectid < (u64)-1) {
4610                         key.offset = 0;
4611                         key.type = BTRFS_ROOT_ITEM_KEY;
4612                         key.objectid++;
4613                 } else {
4614                         break;
4615                 }
4616                 cond_resched();
4617         }
4618
4619 out:
4620         btrfs_free_path(path);
4621         if (trans && !IS_ERR(trans))
4622                 btrfs_end_transaction(trans);
4623         if (ret)
4624                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4625         else if (!closing)
4626                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4627         up(&fs_info->uuid_tree_rescan_sem);
4628         return 0;
4629 }
4630
4631 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4632 {
4633         struct btrfs_trans_handle *trans;
4634         struct btrfs_root *tree_root = fs_info->tree_root;
4635         struct btrfs_root *uuid_root;
4636         struct task_struct *task;
4637         int ret;
4638
4639         /*
4640          * 1 - root node
4641          * 1 - root item
4642          */
4643         trans = btrfs_start_transaction(tree_root, 2);
4644         if (IS_ERR(trans))
4645                 return PTR_ERR(trans);
4646
4647         uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4648         if (IS_ERR(uuid_root)) {
4649                 ret = PTR_ERR(uuid_root);
4650                 btrfs_abort_transaction(trans, ret);
4651                 btrfs_end_transaction(trans);
4652                 return ret;
4653         }
4654
4655         fs_info->uuid_root = uuid_root;
4656
4657         ret = btrfs_commit_transaction(trans);
4658         if (ret)
4659                 return ret;
4660
4661         down(&fs_info->uuid_tree_rescan_sem);
4662         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4663         if (IS_ERR(task)) {
4664                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4665                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4666                 up(&fs_info->uuid_tree_rescan_sem);
4667                 return PTR_ERR(task);
4668         }
4669
4670         return 0;
4671 }
4672
4673 /*
4674  * shrinking a device means finding all of the device extents past
4675  * the new size, and then following the back refs to the chunks.
4676  * The chunk relocation code actually frees the device extent
4677  */
4678 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4679 {
4680         struct btrfs_fs_info *fs_info = device->fs_info;
4681         struct btrfs_root *root = fs_info->dev_root;
4682         struct btrfs_trans_handle *trans;
4683         struct btrfs_dev_extent *dev_extent = NULL;
4684         struct btrfs_path *path;
4685         u64 length;
4686         u64 chunk_offset;
4687         int ret;
4688         int slot;
4689         int failed = 0;
4690         bool retried = false;
4691         struct extent_buffer *l;
4692         struct btrfs_key key;
4693         struct btrfs_super_block *super_copy = fs_info->super_copy;
4694         u64 old_total = btrfs_super_total_bytes(super_copy);
4695         u64 old_size = btrfs_device_get_total_bytes(device);
4696         u64 diff;
4697         u64 start;
4698
4699         new_size = round_down(new_size, fs_info->sectorsize);
4700         start = new_size;
4701         diff = round_down(old_size - new_size, fs_info->sectorsize);
4702
4703         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4704                 return -EINVAL;
4705
4706         path = btrfs_alloc_path();
4707         if (!path)
4708                 return -ENOMEM;
4709
4710         path->reada = READA_BACK;
4711
4712         trans = btrfs_start_transaction(root, 0);
4713         if (IS_ERR(trans)) {
4714                 btrfs_free_path(path);
4715                 return PTR_ERR(trans);
4716         }
4717
4718         mutex_lock(&fs_info->chunk_mutex);
4719
4720         btrfs_device_set_total_bytes(device, new_size);
4721         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4722                 device->fs_devices->total_rw_bytes -= diff;
4723                 atomic64_sub(diff, &fs_info->free_chunk_space);
4724         }
4725
4726         /*
4727          * Once the device's size has been set to the new size, ensure all
4728          * in-memory chunks are synced to disk so that the loop below sees them
4729          * and relocates them accordingly.
4730          */
4731         if (contains_pending_extent(device, &start, diff)) {
4732                 mutex_unlock(&fs_info->chunk_mutex);
4733                 ret = btrfs_commit_transaction(trans);
4734                 if (ret)
4735                         goto done;
4736         } else {
4737                 mutex_unlock(&fs_info->chunk_mutex);
4738                 btrfs_end_transaction(trans);
4739         }
4740
4741 again:
4742         key.objectid = device->devid;
4743         key.offset = (u64)-1;
4744         key.type = BTRFS_DEV_EXTENT_KEY;
4745
4746         do {
4747                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4748                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4749                 if (ret < 0) {
4750                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4751                         goto done;
4752                 }
4753
4754                 ret = btrfs_previous_item(root, path, 0, key.type);
4755                 if (ret)
4756                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4757                 if (ret < 0)
4758                         goto done;
4759                 if (ret) {
4760                         ret = 0;
4761                         btrfs_release_path(path);
4762                         break;
4763                 }
4764
4765                 l = path->nodes[0];
4766                 slot = path->slots[0];
4767                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4768
4769                 if (key.objectid != device->devid) {
4770                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4771                         btrfs_release_path(path);
4772                         break;
4773                 }
4774
4775                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4776                 length = btrfs_dev_extent_length(l, dev_extent);
4777
4778                 if (key.offset + length <= new_size) {
4779                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4780                         btrfs_release_path(path);
4781                         break;
4782                 }
4783
4784                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4785                 btrfs_release_path(path);
4786
4787                 /*
4788                  * We may be relocating the only data chunk we have,
4789                  * which could potentially end up with losing data's
4790                  * raid profile, so lets allocate an empty one in
4791                  * advance.
4792                  */
4793                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4794                 if (ret < 0) {
4795                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4796                         goto done;
4797                 }
4798
4799                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4800                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4801                 if (ret == -ENOSPC) {
4802                         failed++;
4803                 } else if (ret) {
4804                         if (ret == -ETXTBSY) {
4805                                 btrfs_warn(fs_info,
4806                    "could not shrink block group %llu due to active swapfile",
4807                                            chunk_offset);
4808                         }
4809                         goto done;
4810                 }
4811         } while (key.offset-- > 0);
4812
4813         if (failed && !retried) {
4814                 failed = 0;
4815                 retried = true;
4816                 goto again;
4817         } else if (failed && retried) {
4818                 ret = -ENOSPC;
4819                 goto done;
4820         }
4821
4822         /* Shrinking succeeded, else we would be at "done". */
4823         trans = btrfs_start_transaction(root, 0);
4824         if (IS_ERR(trans)) {
4825                 ret = PTR_ERR(trans);
4826                 goto done;
4827         }
4828
4829         mutex_lock(&fs_info->chunk_mutex);
4830         /* Clear all state bits beyond the shrunk device size */
4831         clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4832                           CHUNK_STATE_MASK);
4833
4834         btrfs_device_set_disk_total_bytes(device, new_size);
4835         if (list_empty(&device->post_commit_list))
4836                 list_add_tail(&device->post_commit_list,
4837                               &trans->transaction->dev_update_list);
4838
4839         WARN_ON(diff > old_total);
4840         btrfs_set_super_total_bytes(super_copy,
4841                         round_down(old_total - diff, fs_info->sectorsize));
4842         mutex_unlock(&fs_info->chunk_mutex);
4843
4844         /* Now btrfs_update_device() will change the on-disk size. */
4845         ret = btrfs_update_device(trans, device);
4846         if (ret < 0) {
4847                 btrfs_abort_transaction(trans, ret);
4848                 btrfs_end_transaction(trans);
4849         } else {
4850                 ret = btrfs_commit_transaction(trans);
4851         }
4852 done:
4853         btrfs_free_path(path);
4854         if (ret) {
4855                 mutex_lock(&fs_info->chunk_mutex);
4856                 btrfs_device_set_total_bytes(device, old_size);
4857                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4858                         device->fs_devices->total_rw_bytes += diff;
4859                 atomic64_add(diff, &fs_info->free_chunk_space);
4860                 mutex_unlock(&fs_info->chunk_mutex);
4861         }
4862         return ret;
4863 }
4864
4865 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4866                            struct btrfs_key *key,
4867                            struct btrfs_chunk *chunk, int item_size)
4868 {
4869         struct btrfs_super_block *super_copy = fs_info->super_copy;
4870         struct btrfs_disk_key disk_key;
4871         u32 array_size;
4872         u8 *ptr;
4873
4874         mutex_lock(&fs_info->chunk_mutex);
4875         array_size = btrfs_super_sys_array_size(super_copy);
4876         if (array_size + item_size + sizeof(disk_key)
4877                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4878                 mutex_unlock(&fs_info->chunk_mutex);
4879                 return -EFBIG;
4880         }
4881
4882         ptr = super_copy->sys_chunk_array + array_size;
4883         btrfs_cpu_key_to_disk(&disk_key, key);
4884         memcpy(ptr, &disk_key, sizeof(disk_key));
4885         ptr += sizeof(disk_key);
4886         memcpy(ptr, chunk, item_size);
4887         item_size += sizeof(disk_key);
4888         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4889         mutex_unlock(&fs_info->chunk_mutex);
4890
4891         return 0;
4892 }
4893
4894 /*
4895  * sort the devices in descending order by max_avail, total_avail
4896  */
4897 static int btrfs_cmp_device_info(const void *a, const void *b)
4898 {
4899         const struct btrfs_device_info *di_a = a;
4900         const struct btrfs_device_info *di_b = b;
4901
4902         if (di_a->max_avail > di_b->max_avail)
4903                 return -1;
4904         if (di_a->max_avail < di_b->max_avail)
4905                 return 1;
4906         if (di_a->total_avail > di_b->total_avail)
4907                 return -1;
4908         if (di_a->total_avail < di_b->total_avail)
4909                 return 1;
4910         return 0;
4911 }
4912
4913 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4914 {
4915         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4916                 return;
4917
4918         btrfs_set_fs_incompat(info, RAID56);
4919 }
4920
4921 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4922 {
4923         if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4924                 return;
4925
4926         btrfs_set_fs_incompat(info, RAID1C34);
4927 }
4928
4929 /*
4930  * Structure used internally for __btrfs_alloc_chunk() function.
4931  * Wraps needed parameters.
4932  */
4933 struct alloc_chunk_ctl {
4934         u64 start;
4935         u64 type;
4936         /* Total number of stripes to allocate */
4937         int num_stripes;
4938         /* sub_stripes info for map */
4939         int sub_stripes;
4940         /* Stripes per device */
4941         int dev_stripes;
4942         /* Maximum number of devices to use */
4943         int devs_max;
4944         /* Minimum number of devices to use */
4945         int devs_min;
4946         /* ndevs has to be a multiple of this */
4947         int devs_increment;
4948         /* Number of copies */
4949         int ncopies;
4950         /* Number of stripes worth of bytes to store parity information */
4951         int nparity;
4952         u64 max_stripe_size;
4953         u64 max_chunk_size;
4954         u64 dev_extent_min;
4955         u64 stripe_size;
4956         u64 chunk_size;
4957         int ndevs;
4958 };
4959
4960 static void init_alloc_chunk_ctl_policy_regular(
4961                                 struct btrfs_fs_devices *fs_devices,
4962                                 struct alloc_chunk_ctl *ctl)
4963 {
4964         u64 type = ctl->type;
4965
4966         if (type & BTRFS_BLOCK_GROUP_DATA) {
4967                 ctl->max_stripe_size = SZ_1G;
4968                 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4969         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4970                 /* For larger filesystems, use larger metadata chunks */
4971                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4972                         ctl->max_stripe_size = SZ_1G;
4973                 else
4974                         ctl->max_stripe_size = SZ_256M;
4975                 ctl->max_chunk_size = ctl->max_stripe_size;
4976         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4977                 ctl->max_stripe_size = SZ_32M;
4978                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4979                 ctl->devs_max = min_t(int, ctl->devs_max,
4980                                       BTRFS_MAX_DEVS_SYS_CHUNK);
4981         } else {
4982                 BUG();
4983         }
4984
4985         /* We don't want a chunk larger than 10% of writable space */
4986         ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4987                                   ctl->max_chunk_size);
4988         ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4989 }
4990
4991 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4992                                  struct alloc_chunk_ctl *ctl)
4993 {
4994         int index = btrfs_bg_flags_to_raid_index(ctl->type);
4995
4996         ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4997         ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4998         ctl->devs_max = btrfs_raid_array[index].devs_max;
4999         if (!ctl->devs_max)
5000                 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5001         ctl->devs_min = btrfs_raid_array[index].devs_min;
5002         ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5003         ctl->ncopies = btrfs_raid_array[index].ncopies;
5004         ctl->nparity = btrfs_raid_array[index].nparity;
5005         ctl->ndevs = 0;
5006
5007         switch (fs_devices->chunk_alloc_policy) {
5008         case BTRFS_CHUNK_ALLOC_REGULAR:
5009                 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5010                 break;
5011         default:
5012                 BUG();
5013         }
5014 }
5015
5016 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5017                               struct alloc_chunk_ctl *ctl,
5018                               struct btrfs_device_info *devices_info)
5019 {
5020         struct btrfs_fs_info *info = fs_devices->fs_info;
5021         struct btrfs_device *device;
5022         u64 total_avail;
5023         u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5024         int ret;
5025         int ndevs = 0;
5026         u64 max_avail;
5027         u64 dev_offset;
5028
5029         /*
5030          * in the first pass through the devices list, we gather information
5031          * about the available holes on each device.
5032          */
5033         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5034                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5035                         WARN(1, KERN_ERR
5036                                "BTRFS: read-only device in alloc_list\n");
5037                         continue;
5038                 }
5039
5040                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5041                                         &device->dev_state) ||
5042                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5043                         continue;
5044
5045                 if (device->total_bytes > device->bytes_used)
5046                         total_avail = device->total_bytes - device->bytes_used;
5047                 else
5048                         total_avail = 0;
5049
5050                 /* If there is no space on this device, skip it. */
5051                 if (total_avail < ctl->dev_extent_min)
5052                         continue;
5053
5054                 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5055                                            &max_avail);
5056                 if (ret && ret != -ENOSPC)
5057                         return ret;
5058
5059                 if (ret == 0)
5060                         max_avail = dev_extent_want;
5061
5062                 if (max_avail < ctl->dev_extent_min) {
5063                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
5064                                 btrfs_debug(info,
5065                         "%s: devid %llu has no free space, have=%llu want=%llu",
5066                                             __func__, device->devid, max_avail,
5067                                             ctl->dev_extent_min);
5068                         continue;
5069                 }
5070
5071                 if (ndevs == fs_devices->rw_devices) {
5072                         WARN(1, "%s: found more than %llu devices\n",
5073                              __func__, fs_devices->rw_devices);
5074                         break;
5075                 }
5076                 devices_info[ndevs].dev_offset = dev_offset;
5077                 devices_info[ndevs].max_avail = max_avail;
5078                 devices_info[ndevs].total_avail = total_avail;
5079                 devices_info[ndevs].dev = device;
5080                 ++ndevs;
5081         }
5082         ctl->ndevs = ndevs;
5083
5084         /*
5085          * now sort the devices by hole size / available space
5086          */
5087         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5088              btrfs_cmp_device_info, NULL);
5089
5090         return 0;
5091 }
5092
5093 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5094                                       struct btrfs_device_info *devices_info)
5095 {
5096         /* Number of stripes that count for block group size */
5097         int data_stripes;
5098
5099         /*
5100          * The primary goal is to maximize the number of stripes, so use as
5101          * many devices as possible, even if the stripes are not maximum sized.
5102          *
5103          * The DUP profile stores more than one stripe per device, the
5104          * max_avail is the total size so we have to adjust.
5105          */
5106         ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5107                                    ctl->dev_stripes);
5108         ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5109
5110         /* This will have to be fixed for RAID1 and RAID10 over more drives */
5111         data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5112
5113         /*
5114          * Use the number of data stripes to figure out how big this chunk is
5115          * really going to be in terms of logical address space, and compare
5116          * that answer with the max chunk size. If it's higher, we try to
5117          * reduce stripe_size.
5118          */
5119         if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5120                 /*
5121                  * Reduce stripe_size, round it up to a 16MB boundary again and
5122                  * then use it, unless it ends up being even bigger than the
5123                  * previous value we had already.
5124                  */
5125                 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5126                                                         data_stripes), SZ_16M),
5127                                        ctl->stripe_size);
5128         }
5129
5130         /* Align to BTRFS_STRIPE_LEN */
5131         ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5132         ctl->chunk_size = ctl->stripe_size * data_stripes;
5133
5134         return 0;
5135 }
5136
5137 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5138                               struct alloc_chunk_ctl *ctl,
5139                               struct btrfs_device_info *devices_info)
5140 {
5141         struct btrfs_fs_info *info = fs_devices->fs_info;
5142
5143         /*
5144          * Round down to number of usable stripes, devs_increment can be any
5145          * number so we can't use round_down() that requires power of 2, while
5146          * rounddown is safe.
5147          */
5148         ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5149
5150         if (ctl->ndevs < ctl->devs_min) {
5151                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5152                         btrfs_debug(info,
5153         "%s: not enough devices with free space: have=%d minimum required=%d",
5154                                     __func__, ctl->ndevs, ctl->devs_min);
5155                 }
5156                 return -ENOSPC;
5157         }
5158
5159         ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5160
5161         switch (fs_devices->chunk_alloc_policy) {
5162         case BTRFS_CHUNK_ALLOC_REGULAR:
5163                 return decide_stripe_size_regular(ctl, devices_info);
5164         default:
5165                 BUG();
5166         }
5167 }
5168
5169 static int create_chunk(struct btrfs_trans_handle *trans,
5170                         struct alloc_chunk_ctl *ctl,
5171                         struct btrfs_device_info *devices_info)
5172 {
5173         struct btrfs_fs_info *info = trans->fs_info;
5174         struct map_lookup *map = NULL;
5175         struct extent_map_tree *em_tree;
5176         struct extent_map *em;
5177         u64 start = ctl->start;
5178         u64 type = ctl->type;
5179         int ret;
5180         int i;
5181         int j;
5182
5183         map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5184         if (!map)
5185                 return -ENOMEM;
5186         map->num_stripes = ctl->num_stripes;
5187
5188         for (i = 0; i < ctl->ndevs; ++i) {
5189                 for (j = 0; j < ctl->dev_stripes; ++j) {
5190                         int s = i * ctl->dev_stripes + j;
5191                         map->stripes[s].dev = devices_info[i].dev;
5192                         map->stripes[s].physical = devices_info[i].dev_offset +
5193                                                    j * ctl->stripe_size;
5194                 }
5195         }
5196         map->stripe_len = BTRFS_STRIPE_LEN;
5197         map->io_align = BTRFS_STRIPE_LEN;
5198         map->io_width = BTRFS_STRIPE_LEN;
5199         map->type = type;
5200         map->sub_stripes = ctl->sub_stripes;
5201
5202         trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5203
5204         em = alloc_extent_map();
5205         if (!em) {
5206                 kfree(map);
5207                 return -ENOMEM;
5208         }
5209         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5210         em->map_lookup = map;
5211         em->start = start;
5212         em->len = ctl->chunk_size;
5213         em->block_start = 0;
5214         em->block_len = em->len;
5215         em->orig_block_len = ctl->stripe_size;
5216
5217         em_tree = &info->mapping_tree;
5218         write_lock(&em_tree->lock);
5219         ret = add_extent_mapping(em_tree, em, 0);
5220         if (ret) {
5221                 write_unlock(&em_tree->lock);
5222                 free_extent_map(em);
5223                 return ret;
5224         }
5225         write_unlock(&em_tree->lock);
5226
5227         ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5228         if (ret)
5229                 goto error_del_extent;
5230
5231         for (i = 0; i < map->num_stripes; i++) {
5232                 struct btrfs_device *dev = map->stripes[i].dev;
5233
5234                 btrfs_device_set_bytes_used(dev,
5235                                             dev->bytes_used + ctl->stripe_size);
5236                 if (list_empty(&dev->post_commit_list))
5237                         list_add_tail(&dev->post_commit_list,
5238                                       &trans->transaction->dev_update_list);
5239         }
5240
5241         atomic64_sub(ctl->stripe_size * map->num_stripes,
5242                      &info->free_chunk_space);
5243
5244         free_extent_map(em);
5245         check_raid56_incompat_flag(info, type);
5246         check_raid1c34_incompat_flag(info, type);
5247
5248         return 0;
5249
5250 error_del_extent:
5251         write_lock(&em_tree->lock);
5252         remove_extent_mapping(em_tree, em);
5253         write_unlock(&em_tree->lock);
5254
5255         /* One for our allocation */
5256         free_extent_map(em);
5257         /* One for the tree reference */
5258         free_extent_map(em);
5259
5260         return ret;
5261 }
5262
5263 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5264 {
5265         struct btrfs_fs_info *info = trans->fs_info;
5266         struct btrfs_fs_devices *fs_devices = info->fs_devices;
5267         struct btrfs_device_info *devices_info = NULL;
5268         struct alloc_chunk_ctl ctl;
5269         int ret;
5270
5271         lockdep_assert_held(&info->chunk_mutex);
5272
5273         if (!alloc_profile_is_valid(type, 0)) {
5274                 ASSERT(0);
5275                 return -EINVAL;
5276         }
5277
5278         if (list_empty(&fs_devices->alloc_list)) {
5279                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5280                         btrfs_debug(info, "%s: no writable device", __func__);
5281                 return -ENOSPC;
5282         }
5283
5284         if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5285                 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5286                 ASSERT(0);
5287                 return -EINVAL;
5288         }
5289
5290         ctl.start = find_next_chunk(info);
5291         ctl.type = type;
5292         init_alloc_chunk_ctl(fs_devices, &ctl);
5293
5294         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5295                                GFP_NOFS);
5296         if (!devices_info)
5297                 return -ENOMEM;
5298
5299         ret = gather_device_info(fs_devices, &ctl, devices_info);
5300         if (ret < 0)
5301                 goto out;
5302
5303         ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5304         if (ret < 0)
5305                 goto out;
5306
5307         ret = create_chunk(trans, &ctl, devices_info);
5308
5309 out:
5310         kfree(devices_info);
5311         return ret;
5312 }
5313
5314 /*
5315  * Chunk allocation falls into two parts. The first part does work
5316  * that makes the new allocated chunk usable, but does not do any operation
5317  * that modifies the chunk tree. The second part does the work that
5318  * requires modifying the chunk tree. This division is important for the
5319  * bootstrap process of adding storage to a seed btrfs.
5320  */
5321 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5322                              u64 chunk_offset, u64 chunk_size)
5323 {
5324         struct btrfs_fs_info *fs_info = trans->fs_info;
5325         struct btrfs_root *extent_root = fs_info->extent_root;
5326         struct btrfs_root *chunk_root = fs_info->chunk_root;
5327         struct btrfs_key key;
5328         struct btrfs_device *device;
5329         struct btrfs_chunk *chunk;
5330         struct btrfs_stripe *stripe;
5331         struct extent_map *em;
5332         struct map_lookup *map;
5333         size_t item_size;
5334         u64 dev_offset;
5335         u64 stripe_size;
5336         int i = 0;
5337         int ret = 0;
5338
5339         em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5340         if (IS_ERR(em))
5341                 return PTR_ERR(em);
5342
5343         map = em->map_lookup;
5344         item_size = btrfs_chunk_item_size(map->num_stripes);
5345         stripe_size = em->orig_block_len;
5346
5347         chunk = kzalloc(item_size, GFP_NOFS);
5348         if (!chunk) {
5349                 ret = -ENOMEM;
5350                 goto out;
5351         }
5352
5353         /*
5354          * Take the device list mutex to prevent races with the final phase of
5355          * a device replace operation that replaces the device object associated
5356          * with the map's stripes, because the device object's id can change
5357          * at any time during that final phase of the device replace operation
5358          * (dev-replace.c:btrfs_dev_replace_finishing()).
5359          */
5360         mutex_lock(&fs_info->fs_devices->device_list_mutex);
5361         for (i = 0; i < map->num_stripes; i++) {
5362                 device = map->stripes[i].dev;
5363                 dev_offset = map->stripes[i].physical;
5364
5365                 ret = btrfs_update_device(trans, device);
5366                 if (ret)
5367                         break;
5368                 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5369                                              dev_offset, stripe_size);
5370                 if (ret)
5371                         break;
5372         }
5373         if (ret) {
5374                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5375                 goto out;
5376         }
5377
5378         stripe = &chunk->stripe;
5379         for (i = 0; i < map->num_stripes; i++) {
5380                 device = map->stripes[i].dev;
5381                 dev_offset = map->stripes[i].physical;
5382
5383                 btrfs_set_stack_stripe_devid(stripe, device->devid);
5384                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5385                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5386                 stripe++;
5387         }
5388         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5389
5390         btrfs_set_stack_chunk_length(chunk, chunk_size);
5391         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5392         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5393         btrfs_set_stack_chunk_type(chunk, map->type);
5394         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5395         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5396         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5397         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5398         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5399
5400         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5401         key.type = BTRFS_CHUNK_ITEM_KEY;
5402         key.offset = chunk_offset;
5403
5404         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5405         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5406                 /*
5407                  * TODO: Cleanup of inserted chunk root in case of
5408                  * failure.
5409                  */
5410                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5411         }
5412
5413 out:
5414         kfree(chunk);
5415         free_extent_map(em);
5416         return ret;
5417 }
5418
5419 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5420 {
5421         struct btrfs_fs_info *fs_info = trans->fs_info;
5422         u64 alloc_profile;
5423         int ret;
5424
5425         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5426         ret = btrfs_alloc_chunk(trans, alloc_profile);
5427         if (ret)
5428                 return ret;
5429
5430         alloc_profile = btrfs_system_alloc_profile(fs_info);
5431         ret = btrfs_alloc_chunk(trans, alloc_profile);
5432         return ret;
5433 }
5434
5435 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5436 {
5437         const int index = btrfs_bg_flags_to_raid_index(map->type);
5438
5439         return btrfs_raid_array[index].tolerated_failures;
5440 }
5441
5442 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5443 {
5444         struct extent_map *em;
5445         struct map_lookup *map;
5446         int readonly = 0;
5447         int miss_ndevs = 0;
5448         int i;
5449
5450         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5451         if (IS_ERR(em))
5452                 return 1;
5453
5454         map = em->map_lookup;
5455         for (i = 0; i < map->num_stripes; i++) {
5456                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5457                                         &map->stripes[i].dev->dev_state)) {
5458                         miss_ndevs++;
5459                         continue;
5460                 }
5461                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5462                                         &map->stripes[i].dev->dev_state)) {
5463                         readonly = 1;
5464                         goto end;
5465                 }
5466         }
5467
5468         /*
5469          * If the number of missing devices is larger than max errors,
5470          * we can not write the data into that chunk successfully, so
5471          * set it readonly.
5472          */
5473         if (miss_ndevs > btrfs_chunk_max_errors(map))
5474                 readonly = 1;
5475 end:
5476         free_extent_map(em);
5477         return readonly;
5478 }
5479
5480 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5481 {
5482         struct extent_map *em;
5483
5484         while (1) {
5485                 write_lock(&tree->lock);
5486                 em = lookup_extent_mapping(tree, 0, (u64)-1);
5487                 if (em)
5488                         remove_extent_mapping(tree, em);
5489                 write_unlock(&tree->lock);
5490                 if (!em)
5491                         break;
5492                 /* once for us */
5493                 free_extent_map(em);
5494                 /* once for the tree */
5495                 free_extent_map(em);
5496         }
5497 }
5498
5499 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5500 {
5501         struct extent_map *em;
5502         struct map_lookup *map;
5503         int ret;
5504
5505         em = btrfs_get_chunk_map(fs_info, logical, len);
5506         if (IS_ERR(em))
5507                 /*
5508                  * We could return errors for these cases, but that could get
5509                  * ugly and we'd probably do the same thing which is just not do
5510                  * anything else and exit, so return 1 so the callers don't try
5511                  * to use other copies.
5512                  */
5513                 return 1;
5514
5515         map = em->map_lookup;
5516         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5517                 ret = map->num_stripes;
5518         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5519                 ret = map->sub_stripes;
5520         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5521                 ret = 2;
5522         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5523                 /*
5524                  * There could be two corrupted data stripes, we need
5525                  * to loop retry in order to rebuild the correct data.
5526                  *
5527                  * Fail a stripe at a time on every retry except the
5528                  * stripe under reconstruction.
5529                  */
5530                 ret = map->num_stripes;
5531         else
5532                 ret = 1;
5533         free_extent_map(em);
5534
5535         down_read(&fs_info->dev_replace.rwsem);
5536         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5537             fs_info->dev_replace.tgtdev)
5538                 ret++;
5539         up_read(&fs_info->dev_replace.rwsem);
5540
5541         return ret;
5542 }
5543
5544 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5545                                     u64 logical)
5546 {
5547         struct extent_map *em;
5548         struct map_lookup *map;
5549         unsigned long len = fs_info->sectorsize;
5550
5551         em = btrfs_get_chunk_map(fs_info, logical, len);
5552
5553         if (!WARN_ON(IS_ERR(em))) {
5554                 map = em->map_lookup;
5555                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5556                         len = map->stripe_len * nr_data_stripes(map);
5557                 free_extent_map(em);
5558         }
5559         return len;
5560 }
5561
5562 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5563 {
5564         struct extent_map *em;
5565         struct map_lookup *map;
5566         int ret = 0;
5567
5568         em = btrfs_get_chunk_map(fs_info, logical, len);
5569
5570         if(!WARN_ON(IS_ERR(em))) {
5571                 map = em->map_lookup;
5572                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5573                         ret = 1;
5574                 free_extent_map(em);
5575         }
5576         return ret;
5577 }
5578
5579 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5580                             struct map_lookup *map, int first,
5581                             int dev_replace_is_ongoing)
5582 {
5583         int i;
5584         int num_stripes;
5585         int preferred_mirror;
5586         int tolerance;
5587         struct btrfs_device *srcdev;
5588
5589         ASSERT((map->type &
5590                  (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5591
5592         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5593                 num_stripes = map->sub_stripes;
5594         else
5595                 num_stripes = map->num_stripes;
5596
5597         preferred_mirror = first + current->pid % num_stripes;
5598
5599         if (dev_replace_is_ongoing &&
5600             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5601              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5602                 srcdev = fs_info->dev_replace.srcdev;
5603         else
5604                 srcdev = NULL;
5605
5606         /*
5607          * try to avoid the drive that is the source drive for a
5608          * dev-replace procedure, only choose it if no other non-missing
5609          * mirror is available
5610          */
5611         for (tolerance = 0; tolerance < 2; tolerance++) {
5612                 if (map->stripes[preferred_mirror].dev->bdev &&
5613                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5614                         return preferred_mirror;
5615                 for (i = first; i < first + num_stripes; i++) {
5616                         if (map->stripes[i].dev->bdev &&
5617                             (tolerance || map->stripes[i].dev != srcdev))
5618                                 return i;
5619                 }
5620         }
5621
5622         /* we couldn't find one that doesn't fail.  Just return something
5623          * and the io error handling code will clean up eventually
5624          */
5625         return preferred_mirror;
5626 }
5627
5628 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5629 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5630 {
5631         int i;
5632         int again = 1;
5633
5634         while (again) {
5635                 again = 0;
5636                 for (i = 0; i < num_stripes - 1; i++) {
5637                         /* Swap if parity is on a smaller index */
5638                         if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5639                                 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5640                                 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5641                                 again = 1;
5642                         }
5643                 }
5644         }
5645 }
5646
5647 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5648 {
5649         struct btrfs_bio *bbio = kzalloc(
5650                  /* the size of the btrfs_bio */
5651                 sizeof(struct btrfs_bio) +
5652                 /* plus the variable array for the stripes */
5653                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5654                 /* plus the variable array for the tgt dev */
5655                 sizeof(int) * (real_stripes) +
5656                 /*
5657                  * plus the raid_map, which includes both the tgt dev
5658                  * and the stripes
5659                  */
5660                 sizeof(u64) * (total_stripes),
5661                 GFP_NOFS|__GFP_NOFAIL);
5662
5663         atomic_set(&bbio->error, 0);
5664         refcount_set(&bbio->refs, 1);
5665
5666         bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5667         bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5668
5669         return bbio;
5670 }
5671
5672 void btrfs_get_bbio(struct btrfs_bio *bbio)
5673 {
5674         WARN_ON(!refcount_read(&bbio->refs));
5675         refcount_inc(&bbio->refs);
5676 }
5677
5678 void btrfs_put_bbio(struct btrfs_bio *bbio)
5679 {
5680         if (!bbio)
5681                 return;
5682         if (refcount_dec_and_test(&bbio->refs))
5683                 kfree(bbio);
5684 }
5685
5686 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5687 /*
5688  * Please note that, discard won't be sent to target device of device
5689  * replace.
5690  */
5691 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5692                                          u64 logical, u64 *length_ret,
5693                                          struct btrfs_bio **bbio_ret)
5694 {
5695         struct extent_map *em;
5696         struct map_lookup *map;
5697         struct btrfs_bio *bbio;
5698         u64 length = *length_ret;
5699         u64 offset;
5700         u64 stripe_nr;
5701         u64 stripe_nr_end;
5702         u64 stripe_end_offset;
5703         u64 stripe_cnt;
5704         u64 stripe_len;
5705         u64 stripe_offset;
5706         u64 num_stripes;
5707         u32 stripe_index;
5708         u32 factor = 0;
5709         u32 sub_stripes = 0;
5710         u64 stripes_per_dev = 0;
5711         u32 remaining_stripes = 0;
5712         u32 last_stripe = 0;
5713         int ret = 0;
5714         int i;
5715
5716         /* discard always return a bbio */
5717         ASSERT(bbio_ret);
5718
5719         em = btrfs_get_chunk_map(fs_info, logical, length);
5720         if (IS_ERR(em))
5721                 return PTR_ERR(em);
5722
5723         map = em->map_lookup;
5724         /* we don't discard raid56 yet */
5725         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5726                 ret = -EOPNOTSUPP;
5727                 goto out;
5728         }
5729
5730         offset = logical - em->start;
5731         length = min_t(u64, em->start + em->len - logical, length);
5732         *length_ret = length;
5733
5734         stripe_len = map->stripe_len;
5735         /*
5736          * stripe_nr counts the total number of stripes we have to stride
5737          * to get to this block
5738          */
5739         stripe_nr = div64_u64(offset, stripe_len);
5740
5741         /* stripe_offset is the offset of this block in its stripe */
5742         stripe_offset = offset - stripe_nr * stripe_len;
5743
5744         stripe_nr_end = round_up(offset + length, map->stripe_len);
5745         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5746         stripe_cnt = stripe_nr_end - stripe_nr;
5747         stripe_end_offset = stripe_nr_end * map->stripe_len -
5748                             (offset + length);
5749         /*
5750          * after this, stripe_nr is the number of stripes on this
5751          * device we have to walk to find the data, and stripe_index is
5752          * the number of our device in the stripe array
5753          */
5754         num_stripes = 1;
5755         stripe_index = 0;
5756         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5757                          BTRFS_BLOCK_GROUP_RAID10)) {
5758                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5759                         sub_stripes = 1;
5760                 else
5761                         sub_stripes = map->sub_stripes;
5762
5763                 factor = map->num_stripes / sub_stripes;
5764                 num_stripes = min_t(u64, map->num_stripes,
5765                                     sub_stripes * stripe_cnt);
5766                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5767                 stripe_index *= sub_stripes;
5768                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5769                                               &remaining_stripes);
5770                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5771                 last_stripe *= sub_stripes;
5772         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5773                                 BTRFS_BLOCK_GROUP_DUP)) {
5774                 num_stripes = map->num_stripes;
5775         } else {
5776                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5777                                         &stripe_index);
5778         }
5779
5780         bbio = alloc_btrfs_bio(num_stripes, 0);
5781         if (!bbio) {
5782                 ret = -ENOMEM;
5783                 goto out;
5784         }
5785
5786         for (i = 0; i < num_stripes; i++) {
5787                 bbio->stripes[i].physical =
5788                         map->stripes[stripe_index].physical +
5789                         stripe_offset + stripe_nr * map->stripe_len;
5790                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5791
5792                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5793                                  BTRFS_BLOCK_GROUP_RAID10)) {
5794                         bbio->stripes[i].length = stripes_per_dev *
5795                                 map->stripe_len;
5796
5797                         if (i / sub_stripes < remaining_stripes)
5798                                 bbio->stripes[i].length +=
5799                                         map->stripe_len;
5800
5801                         /*
5802                          * Special for the first stripe and
5803                          * the last stripe:
5804                          *
5805                          * |-------|...|-------|
5806                          *     |----------|
5807                          *    off     end_off
5808                          */
5809                         if (i < sub_stripes)
5810                                 bbio->stripes[i].length -=
5811                                         stripe_offset;
5812
5813                         if (stripe_index >= last_stripe &&
5814                             stripe_index <= (last_stripe +
5815                                              sub_stripes - 1))
5816                                 bbio->stripes[i].length -=
5817                                         stripe_end_offset;
5818
5819                         if (i == sub_stripes - 1)
5820                                 stripe_offset = 0;
5821                 } else {
5822                         bbio->stripes[i].length = length;
5823                 }
5824
5825                 stripe_index++;
5826                 if (stripe_index == map->num_stripes) {
5827                         stripe_index = 0;
5828                         stripe_nr++;
5829                 }
5830         }
5831
5832         *bbio_ret = bbio;
5833         bbio->map_type = map->type;
5834         bbio->num_stripes = num_stripes;
5835 out:
5836         free_extent_map(em);
5837         return ret;
5838 }
5839
5840 /*
5841  * In dev-replace case, for repair case (that's the only case where the mirror
5842  * is selected explicitly when calling btrfs_map_block), blocks left of the
5843  * left cursor can also be read from the target drive.
5844  *
5845  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5846  * array of stripes.
5847  * For READ, it also needs to be supported using the same mirror number.
5848  *
5849  * If the requested block is not left of the left cursor, EIO is returned. This
5850  * can happen because btrfs_num_copies() returns one more in the dev-replace
5851  * case.
5852  */
5853 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5854                                          u64 logical, u64 length,
5855                                          u64 srcdev_devid, int *mirror_num,
5856                                          u64 *physical)
5857 {
5858         struct btrfs_bio *bbio = NULL;
5859         int num_stripes;
5860         int index_srcdev = 0;
5861         int found = 0;
5862         u64 physical_of_found = 0;
5863         int i;
5864         int ret = 0;
5865
5866         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5867                                 logical, &length, &bbio, 0, 0);
5868         if (ret) {
5869                 ASSERT(bbio == NULL);
5870                 return ret;
5871         }
5872
5873         num_stripes = bbio->num_stripes;
5874         if (*mirror_num > num_stripes) {
5875                 /*
5876                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5877                  * that means that the requested area is not left of the left
5878                  * cursor
5879                  */
5880                 btrfs_put_bbio(bbio);
5881                 return -EIO;
5882         }
5883
5884         /*
5885          * process the rest of the function using the mirror_num of the source
5886          * drive. Therefore look it up first.  At the end, patch the device
5887          * pointer to the one of the target drive.
5888          */
5889         for (i = 0; i < num_stripes; i++) {
5890                 if (bbio->stripes[i].dev->devid != srcdev_devid)
5891                         continue;
5892
5893                 /*
5894                  * In case of DUP, in order to keep it simple, only add the
5895                  * mirror with the lowest physical address
5896                  */
5897                 if (found &&
5898                     physical_of_found <= bbio->stripes[i].physical)
5899                         continue;
5900
5901                 index_srcdev = i;
5902                 found = 1;
5903                 physical_of_found = bbio->stripes[i].physical;
5904         }
5905
5906         btrfs_put_bbio(bbio);
5907
5908         ASSERT(found);
5909         if (!found)
5910                 return -EIO;
5911
5912         *mirror_num = index_srcdev + 1;
5913         *physical = physical_of_found;
5914         return ret;
5915 }
5916
5917 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5918                                       struct btrfs_bio **bbio_ret,
5919                                       struct btrfs_dev_replace *dev_replace,
5920                                       int *num_stripes_ret, int *max_errors_ret)
5921 {
5922         struct btrfs_bio *bbio = *bbio_ret;
5923         u64 srcdev_devid = dev_replace->srcdev->devid;
5924         int tgtdev_indexes = 0;
5925         int num_stripes = *num_stripes_ret;
5926         int max_errors = *max_errors_ret;
5927         int i;
5928
5929         if (op == BTRFS_MAP_WRITE) {
5930                 int index_where_to_add;
5931
5932                 /*
5933                  * duplicate the write operations while the dev replace
5934                  * procedure is running. Since the copying of the old disk to
5935                  * the new disk takes place at run time while the filesystem is
5936                  * mounted writable, the regular write operations to the old
5937                  * disk have to be duplicated to go to the new disk as well.
5938                  *
5939                  * Note that device->missing is handled by the caller, and that
5940                  * the write to the old disk is already set up in the stripes
5941                  * array.
5942                  */
5943                 index_where_to_add = num_stripes;
5944                 for (i = 0; i < num_stripes; i++) {
5945                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5946                                 /* write to new disk, too */
5947                                 struct btrfs_bio_stripe *new =
5948                                         bbio->stripes + index_where_to_add;
5949                                 struct btrfs_bio_stripe *old =
5950                                         bbio->stripes + i;
5951
5952                                 new->physical = old->physical;
5953                                 new->length = old->length;
5954                                 new->dev = dev_replace->tgtdev;
5955                                 bbio->tgtdev_map[i] = index_where_to_add;
5956                                 index_where_to_add++;
5957                                 max_errors++;
5958                                 tgtdev_indexes++;
5959                         }
5960                 }
5961                 num_stripes = index_where_to_add;
5962         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5963                 int index_srcdev = 0;
5964                 int found = 0;
5965                 u64 physical_of_found = 0;
5966
5967                 /*
5968                  * During the dev-replace procedure, the target drive can also
5969                  * be used to read data in case it is needed to repair a corrupt
5970                  * block elsewhere. This is possible if the requested area is
5971                  * left of the left cursor. In this area, the target drive is a
5972                  * full copy of the source drive.
5973                  */
5974                 for (i = 0; i < num_stripes; i++) {
5975                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5976                                 /*
5977                                  * In case of DUP, in order to keep it simple,
5978                                  * only add the mirror with the lowest physical
5979                                  * address
5980                                  */
5981                                 if (found &&
5982                                     physical_of_found <=
5983                                      bbio->stripes[i].physical)
5984                                         continue;
5985                                 index_srcdev = i;
5986                                 found = 1;
5987                                 physical_of_found = bbio->stripes[i].physical;
5988                         }
5989                 }
5990                 if (found) {
5991                         struct btrfs_bio_stripe *tgtdev_stripe =
5992                                 bbio->stripes + num_stripes;
5993
5994                         tgtdev_stripe->physical = physical_of_found;
5995                         tgtdev_stripe->length =
5996                                 bbio->stripes[index_srcdev].length;
5997                         tgtdev_stripe->dev = dev_replace->tgtdev;
5998                         bbio->tgtdev_map[index_srcdev] = num_stripes;
5999
6000                         tgtdev_indexes++;
6001                         num_stripes++;
6002                 }
6003         }
6004
6005         *num_stripes_ret = num_stripes;
6006         *max_errors_ret = max_errors;
6007         bbio->num_tgtdevs = tgtdev_indexes;
6008         *bbio_ret = bbio;
6009 }
6010
6011 static bool need_full_stripe(enum btrfs_map_op op)
6012 {
6013         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6014 }
6015
6016 /*
6017  * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
6018  *                     tuple. This information is used to calculate how big a
6019  *                     particular bio can get before it straddles a stripe.
6020  *
6021  * @fs_info - the filesystem
6022  * @logical - address that we want to figure out the geometry of
6023  * @len     - the length of IO we are going to perform, starting at @logical
6024  * @op      - type of operation - write or read
6025  * @io_geom - pointer used to return values
6026  *
6027  * Returns < 0 in case a chunk for the given logical address cannot be found,
6028  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6029  */
6030 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6031                         u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
6032 {
6033         struct extent_map *em;
6034         struct map_lookup *map;
6035         u64 offset;
6036         u64 stripe_offset;
6037         u64 stripe_nr;
6038         u64 stripe_len;
6039         u64 raid56_full_stripe_start = (u64)-1;
6040         int data_stripes;
6041         int ret = 0;
6042
6043         ASSERT(op != BTRFS_MAP_DISCARD);
6044
6045         em = btrfs_get_chunk_map(fs_info, logical, len);
6046         if (IS_ERR(em))
6047                 return PTR_ERR(em);
6048
6049         map = em->map_lookup;
6050         /* Offset of this logical address in the chunk */
6051         offset = logical - em->start;
6052         /* Len of a stripe in a chunk */
6053         stripe_len = map->stripe_len;
6054         /* Stripe wher this block falls in */
6055         stripe_nr = div64_u64(offset, stripe_len);
6056         /* Offset of stripe in the chunk */
6057         stripe_offset = stripe_nr * stripe_len;
6058         if (offset < stripe_offset) {
6059                 btrfs_crit(fs_info,
6060 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6061                         stripe_offset, offset, em->start, logical, stripe_len);
6062                 ret = -EINVAL;
6063                 goto out;
6064         }
6065
6066         /* stripe_offset is the offset of this block in its stripe */
6067         stripe_offset = offset - stripe_offset;
6068         data_stripes = nr_data_stripes(map);
6069
6070         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6071                 u64 max_len = stripe_len - stripe_offset;
6072
6073                 /*
6074                  * In case of raid56, we need to know the stripe aligned start
6075                  */
6076                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6077                         unsigned long full_stripe_len = stripe_len * data_stripes;
6078                         raid56_full_stripe_start = offset;
6079
6080                         /*
6081                          * Allow a write of a full stripe, but make sure we
6082                          * don't allow straddling of stripes
6083                          */
6084                         raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6085                                         full_stripe_len);
6086                         raid56_full_stripe_start *= full_stripe_len;
6087
6088                         /*
6089                          * For writes to RAID[56], allow a full stripeset across
6090                          * all disks. For other RAID types and for RAID[56]
6091                          * reads, just allow a single stripe (on a single disk).
6092                          */
6093                         if (op == BTRFS_MAP_WRITE) {
6094                                 max_len = stripe_len * data_stripes -
6095                                           (offset - raid56_full_stripe_start);
6096                         }
6097                 }
6098                 len = min_t(u64, em->len - offset, max_len);
6099         } else {
6100                 len = em->len - offset;
6101         }
6102
6103         io_geom->len = len;
6104         io_geom->offset = offset;
6105         io_geom->stripe_len = stripe_len;
6106         io_geom->stripe_nr = stripe_nr;
6107         io_geom->stripe_offset = stripe_offset;
6108         io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6109
6110 out:
6111         /* once for us */
6112         free_extent_map(em);
6113         return ret;
6114 }
6115
6116 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6117                              enum btrfs_map_op op,
6118                              u64 logical, u64 *length,
6119                              struct btrfs_bio **bbio_ret,
6120                              int mirror_num, int need_raid_map)
6121 {
6122         struct extent_map *em;
6123         struct map_lookup *map;
6124         u64 stripe_offset;
6125         u64 stripe_nr;
6126         u64 stripe_len;
6127         u32 stripe_index;
6128         int data_stripes;
6129         int i;
6130         int ret = 0;
6131         int num_stripes;
6132         int max_errors = 0;
6133         int tgtdev_indexes = 0;
6134         struct btrfs_bio *bbio = NULL;
6135         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6136         int dev_replace_is_ongoing = 0;
6137         int num_alloc_stripes;
6138         int patch_the_first_stripe_for_dev_replace = 0;
6139         u64 physical_to_patch_in_first_stripe = 0;
6140         u64 raid56_full_stripe_start = (u64)-1;
6141         struct btrfs_io_geometry geom;
6142
6143         ASSERT(bbio_ret);
6144         ASSERT(op != BTRFS_MAP_DISCARD);
6145
6146         ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6147         if (ret < 0)
6148                 return ret;
6149
6150         em = btrfs_get_chunk_map(fs_info, logical, *length);
6151         ASSERT(!IS_ERR(em));
6152         map = em->map_lookup;
6153
6154         *length = geom.len;
6155         stripe_len = geom.stripe_len;
6156         stripe_nr = geom.stripe_nr;
6157         stripe_offset = geom.stripe_offset;
6158         raid56_full_stripe_start = geom.raid56_stripe_offset;
6159         data_stripes = nr_data_stripes(map);
6160
6161         down_read(&dev_replace->rwsem);
6162         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6163         /*
6164          * Hold the semaphore for read during the whole operation, write is
6165          * requested at commit time but must wait.
6166          */
6167         if (!dev_replace_is_ongoing)
6168                 up_read(&dev_replace->rwsem);
6169
6170         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6171             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6172                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6173                                                     dev_replace->srcdev->devid,
6174                                                     &mirror_num,
6175                                             &physical_to_patch_in_first_stripe);
6176                 if (ret)
6177                         goto out;
6178                 else
6179                         patch_the_first_stripe_for_dev_replace = 1;
6180         } else if (mirror_num > map->num_stripes) {
6181                 mirror_num = 0;
6182         }
6183
6184         num_stripes = 1;
6185         stripe_index = 0;
6186         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6187                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6188                                 &stripe_index);
6189                 if (!need_full_stripe(op))
6190                         mirror_num = 1;
6191         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6192                 if (need_full_stripe(op))
6193                         num_stripes = map->num_stripes;
6194                 else if (mirror_num)
6195                         stripe_index = mirror_num - 1;
6196                 else {
6197                         stripe_index = find_live_mirror(fs_info, map, 0,
6198                                             dev_replace_is_ongoing);
6199                         mirror_num = stripe_index + 1;
6200                 }
6201
6202         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6203                 if (need_full_stripe(op)) {
6204                         num_stripes = map->num_stripes;
6205                 } else if (mirror_num) {
6206                         stripe_index = mirror_num - 1;
6207                 } else {
6208                         mirror_num = 1;
6209                 }
6210
6211         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6212                 u32 factor = map->num_stripes / map->sub_stripes;
6213
6214                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6215                 stripe_index *= map->sub_stripes;
6216
6217                 if (need_full_stripe(op))
6218                         num_stripes = map->sub_stripes;
6219                 else if (mirror_num)
6220                         stripe_index += mirror_num - 1;
6221                 else {
6222                         int old_stripe_index = stripe_index;
6223                         stripe_index = find_live_mirror(fs_info, map,
6224                                               stripe_index,
6225                                               dev_replace_is_ongoing);
6226                         mirror_num = stripe_index - old_stripe_index + 1;
6227                 }
6228
6229         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6230                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6231                         /* push stripe_nr back to the start of the full stripe */
6232                         stripe_nr = div64_u64(raid56_full_stripe_start,
6233                                         stripe_len * data_stripes);
6234
6235                         /* RAID[56] write or recovery. Return all stripes */
6236                         num_stripes = map->num_stripes;
6237                         max_errors = nr_parity_stripes(map);
6238
6239                         *length = map->stripe_len;
6240                         stripe_index = 0;
6241                         stripe_offset = 0;
6242                 } else {
6243                         /*
6244                          * Mirror #0 or #1 means the original data block.
6245                          * Mirror #2 is RAID5 parity block.
6246                          * Mirror #3 is RAID6 Q block.
6247                          */
6248                         stripe_nr = div_u64_rem(stripe_nr,
6249                                         data_stripes, &stripe_index);
6250                         if (mirror_num > 1)
6251                                 stripe_index = data_stripes + mirror_num - 2;
6252
6253                         /* We distribute the parity blocks across stripes */
6254                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6255                                         &stripe_index);
6256                         if (!need_full_stripe(op) && mirror_num <= 1)
6257                                 mirror_num = 1;
6258                 }
6259         } else {
6260                 /*
6261                  * after this, stripe_nr is the number of stripes on this
6262                  * device we have to walk to find the data, and stripe_index is
6263                  * the number of our device in the stripe array
6264                  */
6265                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6266                                 &stripe_index);
6267                 mirror_num = stripe_index + 1;
6268         }
6269         if (stripe_index >= map->num_stripes) {
6270                 btrfs_crit(fs_info,
6271                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6272                            stripe_index, map->num_stripes);
6273                 ret = -EINVAL;
6274                 goto out;
6275         }
6276
6277         num_alloc_stripes = num_stripes;
6278         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6279                 if (op == BTRFS_MAP_WRITE)
6280                         num_alloc_stripes <<= 1;
6281                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6282                         num_alloc_stripes++;
6283                 tgtdev_indexes = num_stripes;
6284         }
6285
6286         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6287         if (!bbio) {
6288                 ret = -ENOMEM;
6289                 goto out;
6290         }
6291
6292         for (i = 0; i < num_stripes; i++) {
6293                 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6294                         stripe_offset + stripe_nr * map->stripe_len;
6295                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6296                 stripe_index++;
6297         }
6298
6299         /* build raid_map */
6300         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6301             (need_full_stripe(op) || mirror_num > 1)) {
6302                 u64 tmp;
6303                 unsigned rot;
6304
6305                 /* Work out the disk rotation on this stripe-set */
6306                 div_u64_rem(stripe_nr, num_stripes, &rot);
6307
6308                 /* Fill in the logical address of each stripe */
6309                 tmp = stripe_nr * data_stripes;
6310                 for (i = 0; i < data_stripes; i++)
6311                         bbio->raid_map[(i+rot) % num_stripes] =
6312                                 em->start + (tmp + i) * map->stripe_len;
6313
6314                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6315                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6316                         bbio->raid_map[(i+rot+1) % num_stripes] =
6317                                 RAID6_Q_STRIPE;
6318
6319                 sort_parity_stripes(bbio, num_stripes);
6320         }
6321
6322         if (need_full_stripe(op))
6323                 max_errors = btrfs_chunk_max_errors(map);
6324
6325         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6326             need_full_stripe(op)) {
6327                 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6328                                           &max_errors);
6329         }
6330
6331         *bbio_ret = bbio;
6332         bbio->map_type = map->type;
6333         bbio->num_stripes = num_stripes;
6334         bbio->max_errors = max_errors;
6335         bbio->mirror_num = mirror_num;
6336
6337         /*
6338          * this is the case that REQ_READ && dev_replace_is_ongoing &&
6339          * mirror_num == num_stripes + 1 && dev_replace target drive is
6340          * available as a mirror
6341          */
6342         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6343                 WARN_ON(num_stripes > 1);
6344                 bbio->stripes[0].dev = dev_replace->tgtdev;
6345                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6346                 bbio->mirror_num = map->num_stripes + 1;
6347         }
6348 out:
6349         if (dev_replace_is_ongoing) {
6350                 lockdep_assert_held(&dev_replace->rwsem);
6351                 /* Unlock and let waiting writers proceed */
6352                 up_read(&dev_replace->rwsem);
6353         }
6354         free_extent_map(em);
6355         return ret;
6356 }
6357
6358 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6359                       u64 logical, u64 *length,
6360                       struct btrfs_bio **bbio_ret, int mirror_num)
6361 {
6362         if (op == BTRFS_MAP_DISCARD)
6363                 return __btrfs_map_block_for_discard(fs_info, logical,
6364                                                      length, bbio_ret);
6365
6366         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6367                                  mirror_num, 0);
6368 }
6369
6370 /* For Scrub/replace */
6371 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6372                      u64 logical, u64 *length,
6373                      struct btrfs_bio **bbio_ret)
6374 {
6375         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6376 }
6377
6378 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6379 {
6380         bio->bi_private = bbio->private;
6381         bio->bi_end_io = bbio->end_io;
6382         bio_endio(bio);
6383
6384         btrfs_put_bbio(bbio);
6385 }
6386
6387 static void btrfs_end_bio(struct bio *bio)
6388 {
6389         struct btrfs_bio *bbio = bio->bi_private;
6390         int is_orig_bio = 0;
6391
6392         if (bio->bi_status) {
6393                 atomic_inc(&bbio->error);
6394                 if (bio->bi_status == BLK_STS_IOERR ||
6395                     bio->bi_status == BLK_STS_TARGET) {
6396                         struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6397
6398                         ASSERT(dev->bdev);
6399                         if (bio_op(bio) == REQ_OP_WRITE)
6400                                 btrfs_dev_stat_inc_and_print(dev,
6401                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6402                         else if (!(bio->bi_opf & REQ_RAHEAD))
6403                                 btrfs_dev_stat_inc_and_print(dev,
6404                                                 BTRFS_DEV_STAT_READ_ERRS);
6405                         if (bio->bi_opf & REQ_PREFLUSH)
6406                                 btrfs_dev_stat_inc_and_print(dev,
6407                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6408                 }
6409         }
6410
6411         if (bio == bbio->orig_bio)
6412                 is_orig_bio = 1;
6413
6414         btrfs_bio_counter_dec(bbio->fs_info);
6415
6416         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6417                 if (!is_orig_bio) {
6418                         bio_put(bio);
6419                         bio = bbio->orig_bio;
6420                 }
6421
6422                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6423                 /* only send an error to the higher layers if it is
6424                  * beyond the tolerance of the btrfs bio
6425                  */
6426                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6427                         bio->bi_status = BLK_STS_IOERR;
6428                 } else {
6429                         /*
6430                          * this bio is actually up to date, we didn't
6431                          * go over the max number of errors
6432                          */
6433                         bio->bi_status = BLK_STS_OK;
6434                 }
6435
6436                 btrfs_end_bbio(bbio, bio);
6437         } else if (!is_orig_bio) {
6438                 bio_put(bio);
6439         }
6440 }
6441
6442 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6443                               u64 physical, struct btrfs_device *dev)
6444 {
6445         struct btrfs_fs_info *fs_info = bbio->fs_info;
6446
6447         bio->bi_private = bbio;
6448         btrfs_io_bio(bio)->device = dev;
6449         bio->bi_end_io = btrfs_end_bio;
6450         bio->bi_iter.bi_sector = physical >> 9;
6451         btrfs_debug_in_rcu(fs_info,
6452         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6453                 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6454                 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6455                 dev->devid, bio->bi_iter.bi_size);
6456         bio_set_dev(bio, dev->bdev);
6457
6458         btrfs_bio_counter_inc_noblocked(fs_info);
6459
6460         btrfsic_submit_bio(bio);
6461 }
6462
6463 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6464 {
6465         atomic_inc(&bbio->error);
6466         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6467                 /* Should be the original bio. */
6468                 WARN_ON(bio != bbio->orig_bio);
6469
6470                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6471                 bio->bi_iter.bi_sector = logical >> 9;
6472                 if (atomic_read(&bbio->error) > bbio->max_errors)
6473                         bio->bi_status = BLK_STS_IOERR;
6474                 else
6475                         bio->bi_status = BLK_STS_OK;
6476                 btrfs_end_bbio(bbio, bio);
6477         }
6478 }
6479
6480 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6481                            int mirror_num)
6482 {
6483         struct btrfs_device *dev;
6484         struct bio *first_bio = bio;
6485         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6486         u64 length = 0;
6487         u64 map_length;
6488         int ret;
6489         int dev_nr;
6490         int total_devs;
6491         struct btrfs_bio *bbio = NULL;
6492
6493         length = bio->bi_iter.bi_size;
6494         map_length = length;
6495
6496         btrfs_bio_counter_inc_blocked(fs_info);
6497         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6498                                 &map_length, &bbio, mirror_num, 1);
6499         if (ret) {
6500                 btrfs_bio_counter_dec(fs_info);
6501                 return errno_to_blk_status(ret);
6502         }
6503
6504         total_devs = bbio->num_stripes;
6505         bbio->orig_bio = first_bio;
6506         bbio->private = first_bio->bi_private;
6507         bbio->end_io = first_bio->bi_end_io;
6508         bbio->fs_info = fs_info;
6509         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6510
6511         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6512             ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6513                 /* In this case, map_length has been set to the length of
6514                    a single stripe; not the whole write */
6515                 if (bio_op(bio) == REQ_OP_WRITE) {
6516                         ret = raid56_parity_write(fs_info, bio, bbio,
6517                                                   map_length);
6518                 } else {
6519                         ret = raid56_parity_recover(fs_info, bio, bbio,
6520                                                     map_length, mirror_num, 1);
6521                 }
6522
6523                 btrfs_bio_counter_dec(fs_info);
6524                 return errno_to_blk_status(ret);
6525         }
6526
6527         if (map_length < length) {
6528                 btrfs_crit(fs_info,
6529                            "mapping failed logical %llu bio len %llu len %llu",
6530                            logical, length, map_length);
6531                 BUG();
6532         }
6533
6534         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6535                 dev = bbio->stripes[dev_nr].dev;
6536                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6537                                                    &dev->dev_state) ||
6538                     (bio_op(first_bio) == REQ_OP_WRITE &&
6539                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6540                         bbio_error(bbio, first_bio, logical);
6541                         continue;
6542                 }
6543
6544                 if (dev_nr < total_devs - 1)
6545                         bio = btrfs_bio_clone(first_bio);
6546                 else
6547                         bio = first_bio;
6548
6549                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6550         }
6551         btrfs_bio_counter_dec(fs_info);
6552         return BLK_STS_OK;
6553 }
6554
6555 /*
6556  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6557  * return NULL.
6558  *
6559  * If devid and uuid are both specified, the match must be exact, otherwise
6560  * only devid is used.
6561  *
6562  * If @seed is true, traverse through the seed devices.
6563  */
6564 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6565                                        u64 devid, u8 *uuid, u8 *fsid,
6566                                        bool seed)
6567 {
6568         struct btrfs_device *device;
6569         struct btrfs_fs_devices *seed_devs;
6570
6571         if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6572                 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6573                         if (device->devid == devid &&
6574                             (!uuid || memcmp(device->uuid, uuid,
6575                                              BTRFS_UUID_SIZE) == 0))
6576                                 return device;
6577                 }
6578         }
6579
6580         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6581                 if (!fsid ||
6582                     !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6583                         list_for_each_entry(device, &seed_devs->devices,
6584                                             dev_list) {
6585                                 if (device->devid == devid &&
6586                                     (!uuid || memcmp(device->uuid, uuid,
6587                                                      BTRFS_UUID_SIZE) == 0))
6588                                         return device;
6589                         }
6590                 }
6591         }
6592
6593         return NULL;
6594 }
6595
6596 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6597                                             u64 devid, u8 *dev_uuid)
6598 {
6599         struct btrfs_device *device;
6600         unsigned int nofs_flag;
6601
6602         /*
6603          * We call this under the chunk_mutex, so we want to use NOFS for this
6604          * allocation, however we don't want to change btrfs_alloc_device() to
6605          * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6606          * places.
6607          */
6608         nofs_flag = memalloc_nofs_save();
6609         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6610         memalloc_nofs_restore(nofs_flag);
6611         if (IS_ERR(device))
6612                 return device;
6613
6614         list_add(&device->dev_list, &fs_devices->devices);
6615         device->fs_devices = fs_devices;
6616         fs_devices->num_devices++;
6617
6618         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6619         fs_devices->missing_devices++;
6620
6621         return device;
6622 }
6623
6624 /**
6625  * btrfs_alloc_device - allocate struct btrfs_device
6626  * @fs_info:    used only for generating a new devid, can be NULL if
6627  *              devid is provided (i.e. @devid != NULL).
6628  * @devid:      a pointer to devid for this device.  If NULL a new devid
6629  *              is generated.
6630  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6631  *              is generated.
6632  *
6633  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6634  * on error.  Returned struct is not linked onto any lists and must be
6635  * destroyed with btrfs_free_device.
6636  */
6637 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6638                                         const u64 *devid,
6639                                         const u8 *uuid)
6640 {
6641         struct btrfs_device *dev;
6642         u64 tmp;
6643
6644         if (WARN_ON(!devid && !fs_info))
6645                 return ERR_PTR(-EINVAL);
6646
6647         dev = __alloc_device(fs_info);
6648         if (IS_ERR(dev))
6649                 return dev;
6650
6651         if (devid)
6652                 tmp = *devid;
6653         else {
6654                 int ret;
6655
6656                 ret = find_next_devid(fs_info, &tmp);
6657                 if (ret) {
6658                         btrfs_free_device(dev);
6659                         return ERR_PTR(ret);
6660                 }
6661         }
6662         dev->devid = tmp;
6663
6664         if (uuid)
6665                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6666         else
6667                 generate_random_uuid(dev->uuid);
6668
6669         return dev;
6670 }
6671
6672 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6673                                         u64 devid, u8 *uuid, bool error)
6674 {
6675         if (error)
6676                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6677                               devid, uuid);
6678         else
6679                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6680                               devid, uuid);
6681 }
6682
6683 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6684 {
6685         int index = btrfs_bg_flags_to_raid_index(type);
6686         int ncopies = btrfs_raid_array[index].ncopies;
6687         const int nparity = btrfs_raid_array[index].nparity;
6688         int data_stripes;
6689
6690         if (nparity)
6691                 data_stripes = num_stripes - nparity;
6692         else
6693                 data_stripes = num_stripes / ncopies;
6694
6695         return div_u64(chunk_len, data_stripes);
6696 }
6697
6698 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6699                           struct btrfs_chunk *chunk)
6700 {
6701         struct btrfs_fs_info *fs_info = leaf->fs_info;
6702         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6703         struct map_lookup *map;
6704         struct extent_map *em;
6705         u64 logical;
6706         u64 length;
6707         u64 devid;
6708         u8 uuid[BTRFS_UUID_SIZE];
6709         int num_stripes;
6710         int ret;
6711         int i;
6712
6713         logical = key->offset;
6714         length = btrfs_chunk_length(leaf, chunk);
6715         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6716
6717         /*
6718          * Only need to verify chunk item if we're reading from sys chunk array,
6719          * as chunk item in tree block is already verified by tree-checker.
6720          */
6721         if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6722                 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6723                 if (ret)
6724                         return ret;
6725         }
6726
6727         read_lock(&map_tree->lock);
6728         em = lookup_extent_mapping(map_tree, logical, 1);
6729         read_unlock(&map_tree->lock);
6730
6731         /* already mapped? */
6732         if (em && em->start <= logical && em->start + em->len > logical) {
6733                 free_extent_map(em);
6734                 return 0;
6735         } else if (em) {
6736                 free_extent_map(em);
6737         }
6738
6739         em = alloc_extent_map();
6740         if (!em)
6741                 return -ENOMEM;
6742         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6743         if (!map) {
6744                 free_extent_map(em);
6745                 return -ENOMEM;
6746         }
6747
6748         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6749         em->map_lookup = map;
6750         em->start = logical;
6751         em->len = length;
6752         em->orig_start = 0;
6753         em->block_start = 0;
6754         em->block_len = em->len;
6755
6756         map->num_stripes = num_stripes;
6757         map->io_width = btrfs_chunk_io_width(leaf, chunk);
6758         map->io_align = btrfs_chunk_io_align(leaf, chunk);
6759         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6760         map->type = btrfs_chunk_type(leaf, chunk);
6761         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6762         map->verified_stripes = 0;
6763         em->orig_block_len = calc_stripe_length(map->type, em->len,
6764                                                 map->num_stripes);
6765         for (i = 0; i < num_stripes; i++) {
6766                 map->stripes[i].physical =
6767                         btrfs_stripe_offset_nr(leaf, chunk, i);
6768                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6769                 read_extent_buffer(leaf, uuid, (unsigned long)
6770                                    btrfs_stripe_dev_uuid_nr(chunk, i),
6771                                    BTRFS_UUID_SIZE);
6772                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6773                                                         devid, uuid, NULL, true);
6774                 if (!map->stripes[i].dev &&
6775                     !btrfs_test_opt(fs_info, DEGRADED)) {
6776                         free_extent_map(em);
6777                         btrfs_report_missing_device(fs_info, devid, uuid, true);
6778                         return -ENOENT;
6779                 }
6780                 if (!map->stripes[i].dev) {
6781                         map->stripes[i].dev =
6782                                 add_missing_dev(fs_info->fs_devices, devid,
6783                                                 uuid);
6784                         if (IS_ERR(map->stripes[i].dev)) {
6785                                 free_extent_map(em);
6786                                 btrfs_err(fs_info,
6787                                         "failed to init missing dev %llu: %ld",
6788                                         devid, PTR_ERR(map->stripes[i].dev));
6789                                 return PTR_ERR(map->stripes[i].dev);
6790                         }
6791                         btrfs_report_missing_device(fs_info, devid, uuid, false);
6792                 }
6793                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6794                                 &(map->stripes[i].dev->dev_state));
6795
6796         }
6797
6798         write_lock(&map_tree->lock);
6799         ret = add_extent_mapping(map_tree, em, 0);
6800         write_unlock(&map_tree->lock);
6801         if (ret < 0) {
6802                 btrfs_err(fs_info,
6803                           "failed to add chunk map, start=%llu len=%llu: %d",
6804                           em->start, em->len, ret);
6805         }
6806         free_extent_map(em);
6807
6808         return ret;
6809 }
6810
6811 static void fill_device_from_item(struct extent_buffer *leaf,
6812                                  struct btrfs_dev_item *dev_item,
6813                                  struct btrfs_device *device)
6814 {
6815         unsigned long ptr;
6816
6817         device->devid = btrfs_device_id(leaf, dev_item);
6818         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6819         device->total_bytes = device->disk_total_bytes;
6820         device->commit_total_bytes = device->disk_total_bytes;
6821         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6822         device->commit_bytes_used = device->bytes_used;
6823         device->type = btrfs_device_type(leaf, dev_item);
6824         device->io_align = btrfs_device_io_align(leaf, dev_item);
6825         device->io_width = btrfs_device_io_width(leaf, dev_item);
6826         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6827         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6828         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6829
6830         ptr = btrfs_device_uuid(dev_item);
6831         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6832 }
6833
6834 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6835                                                   u8 *fsid)
6836 {
6837         struct btrfs_fs_devices *fs_devices;
6838         int ret;
6839
6840         lockdep_assert_held(&uuid_mutex);
6841         ASSERT(fsid);
6842
6843         /* This will match only for multi-device seed fs */
6844         list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6845                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6846                         return fs_devices;
6847
6848
6849         fs_devices = find_fsid(fsid, NULL);
6850         if (!fs_devices) {
6851                 if (!btrfs_test_opt(fs_info, DEGRADED))
6852                         return ERR_PTR(-ENOENT);
6853
6854                 fs_devices = alloc_fs_devices(fsid, NULL);
6855                 if (IS_ERR(fs_devices))
6856                         return fs_devices;
6857
6858                 fs_devices->seeding = true;
6859                 fs_devices->opened = 1;
6860                 return fs_devices;
6861         }
6862
6863         /*
6864          * Upon first call for a seed fs fsid, just create a private copy of the
6865          * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6866          */
6867         fs_devices = clone_fs_devices(fs_devices);
6868         if (IS_ERR(fs_devices))
6869                 return fs_devices;
6870
6871         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6872         if (ret) {
6873                 free_fs_devices(fs_devices);
6874                 return ERR_PTR(ret);
6875         }
6876
6877         if (!fs_devices->seeding) {
6878                 close_fs_devices(fs_devices);
6879                 free_fs_devices(fs_devices);
6880                 return ERR_PTR(-EINVAL);
6881         }
6882
6883         list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6884
6885         return fs_devices;
6886 }
6887
6888 static int read_one_dev(struct extent_buffer *leaf,
6889                         struct btrfs_dev_item *dev_item)
6890 {
6891         struct btrfs_fs_info *fs_info = leaf->fs_info;
6892         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6893         struct btrfs_device *device;
6894         u64 devid;
6895         int ret;
6896         u8 fs_uuid[BTRFS_FSID_SIZE];
6897         u8 dev_uuid[BTRFS_UUID_SIZE];
6898
6899         devid = btrfs_device_id(leaf, dev_item);
6900         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6901                            BTRFS_UUID_SIZE);
6902         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6903                            BTRFS_FSID_SIZE);
6904
6905         if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6906                 fs_devices = open_seed_devices(fs_info, fs_uuid);
6907                 if (IS_ERR(fs_devices))
6908                         return PTR_ERR(fs_devices);
6909         }
6910
6911         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6912                                    fs_uuid, true);
6913         if (!device) {
6914                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6915                         btrfs_report_missing_device(fs_info, devid,
6916                                                         dev_uuid, true);
6917                         return -ENOENT;
6918                 }
6919
6920                 device = add_missing_dev(fs_devices, devid, dev_uuid);
6921                 if (IS_ERR(device)) {
6922                         btrfs_err(fs_info,
6923                                 "failed to add missing dev %llu: %ld",
6924                                 devid, PTR_ERR(device));
6925                         return PTR_ERR(device);
6926                 }
6927                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6928         } else {
6929                 if (!device->bdev) {
6930                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
6931                                 btrfs_report_missing_device(fs_info,
6932                                                 devid, dev_uuid, true);
6933                                 return -ENOENT;
6934                         }
6935                         btrfs_report_missing_device(fs_info, devid,
6936                                                         dev_uuid, false);
6937                 }
6938
6939                 if (!device->bdev &&
6940                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6941                         /*
6942                          * this happens when a device that was properly setup
6943                          * in the device info lists suddenly goes bad.
6944                          * device->bdev is NULL, and so we have to set
6945                          * device->missing to one here
6946                          */
6947                         device->fs_devices->missing_devices++;
6948                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6949                 }
6950
6951                 /* Move the device to its own fs_devices */
6952                 if (device->fs_devices != fs_devices) {
6953                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6954                                                         &device->dev_state));
6955
6956                         list_move(&device->dev_list, &fs_devices->devices);
6957                         device->fs_devices->num_devices--;
6958                         fs_devices->num_devices++;
6959
6960                         device->fs_devices->missing_devices--;
6961                         fs_devices->missing_devices++;
6962
6963                         device->fs_devices = fs_devices;
6964                 }
6965         }
6966
6967         if (device->fs_devices != fs_info->fs_devices) {
6968                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6969                 if (device->generation !=
6970                     btrfs_device_generation(leaf, dev_item))
6971                         return -EINVAL;
6972         }
6973
6974         fill_device_from_item(leaf, dev_item, device);
6975         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6976         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6977            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6978                 device->fs_devices->total_rw_bytes += device->total_bytes;
6979                 atomic64_add(device->total_bytes - device->bytes_used,
6980                                 &fs_info->free_chunk_space);
6981         }
6982         ret = 0;
6983         return ret;
6984 }
6985
6986 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6987 {
6988         struct btrfs_root *root = fs_info->tree_root;
6989         struct btrfs_super_block *super_copy = fs_info->super_copy;
6990         struct extent_buffer *sb;
6991         struct btrfs_disk_key *disk_key;
6992         struct btrfs_chunk *chunk;
6993         u8 *array_ptr;
6994         unsigned long sb_array_offset;
6995         int ret = 0;
6996         u32 num_stripes;
6997         u32 array_size;
6998         u32 len = 0;
6999         u32 cur_offset;
7000         u64 type;
7001         struct btrfs_key key;
7002
7003         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7004         /*
7005          * This will create extent buffer of nodesize, superblock size is
7006          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7007          * overallocate but we can keep it as-is, only the first page is used.
7008          */
7009         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
7010         if (IS_ERR(sb))
7011                 return PTR_ERR(sb);
7012         set_extent_buffer_uptodate(sb);
7013         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
7014         /*
7015          * The sb extent buffer is artificial and just used to read the system array.
7016          * set_extent_buffer_uptodate() call does not properly mark all it's
7017          * pages up-to-date when the page is larger: extent does not cover the
7018          * whole page and consequently check_page_uptodate does not find all
7019          * the page's extents up-to-date (the hole beyond sb),
7020          * write_extent_buffer then triggers a WARN_ON.
7021          *
7022          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7023          * but sb spans only this function. Add an explicit SetPageUptodate call
7024          * to silence the warning eg. on PowerPC 64.
7025          */
7026         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7027                 SetPageUptodate(sb->pages[0]);
7028
7029         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7030         array_size = btrfs_super_sys_array_size(super_copy);
7031
7032         array_ptr = super_copy->sys_chunk_array;
7033         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7034         cur_offset = 0;
7035
7036         while (cur_offset < array_size) {
7037                 disk_key = (struct btrfs_disk_key *)array_ptr;
7038                 len = sizeof(*disk_key);
7039                 if (cur_offset + len > array_size)
7040                         goto out_short_read;
7041
7042                 btrfs_disk_key_to_cpu(&key, disk_key);
7043
7044                 array_ptr += len;
7045                 sb_array_offset += len;
7046                 cur_offset += len;
7047
7048                 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7049                         btrfs_err(fs_info,
7050                             "unexpected item type %u in sys_array at offset %u",
7051                                   (u32)key.type, cur_offset);
7052                         ret = -EIO;
7053                         break;
7054                 }
7055
7056                 chunk = (struct btrfs_chunk *)sb_array_offset;
7057                 /*
7058                  * At least one btrfs_chunk with one stripe must be present,
7059                  * exact stripe count check comes afterwards
7060                  */
7061                 len = btrfs_chunk_item_size(1);
7062                 if (cur_offset + len > array_size)
7063                         goto out_short_read;
7064
7065                 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7066                 if (!num_stripes) {
7067                         btrfs_err(fs_info,
7068                         "invalid number of stripes %u in sys_array at offset %u",
7069                                   num_stripes, cur_offset);
7070                         ret = -EIO;
7071                         break;
7072                 }
7073
7074                 type = btrfs_chunk_type(sb, chunk);
7075                 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7076                         btrfs_err(fs_info,
7077                         "invalid chunk type %llu in sys_array at offset %u",
7078                                   type, cur_offset);
7079                         ret = -EIO;
7080                         break;
7081                 }
7082
7083                 len = btrfs_chunk_item_size(num_stripes);
7084                 if (cur_offset + len > array_size)
7085                         goto out_short_read;
7086
7087                 ret = read_one_chunk(&key, sb, chunk);
7088                 if (ret)
7089                         break;
7090
7091                 array_ptr += len;
7092                 sb_array_offset += len;
7093                 cur_offset += len;
7094         }
7095         clear_extent_buffer_uptodate(sb);
7096         free_extent_buffer_stale(sb);
7097         return ret;
7098
7099 out_short_read:
7100         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7101                         len, cur_offset);
7102         clear_extent_buffer_uptodate(sb);
7103         free_extent_buffer_stale(sb);
7104         return -EIO;
7105 }
7106
7107 /*
7108  * Check if all chunks in the fs are OK for read-write degraded mount
7109  *
7110  * If the @failing_dev is specified, it's accounted as missing.
7111  *
7112  * Return true if all chunks meet the minimal RW mount requirements.
7113  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7114  */
7115 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7116                                         struct btrfs_device *failing_dev)
7117 {
7118         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7119         struct extent_map *em;
7120         u64 next_start = 0;
7121         bool ret = true;
7122
7123         read_lock(&map_tree->lock);
7124         em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7125         read_unlock(&map_tree->lock);
7126         /* No chunk at all? Return false anyway */
7127         if (!em) {
7128                 ret = false;
7129                 goto out;
7130         }
7131         while (em) {
7132                 struct map_lookup *map;
7133                 int missing = 0;
7134                 int max_tolerated;
7135                 int i;
7136
7137                 map = em->map_lookup;
7138                 max_tolerated =
7139                         btrfs_get_num_tolerated_disk_barrier_failures(
7140                                         map->type);
7141                 for (i = 0; i < map->num_stripes; i++) {
7142                         struct btrfs_device *dev = map->stripes[i].dev;
7143
7144                         if (!dev || !dev->bdev ||
7145                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7146                             dev->last_flush_error)
7147                                 missing++;
7148                         else if (failing_dev && failing_dev == dev)
7149                                 missing++;
7150                 }
7151                 if (missing > max_tolerated) {
7152                         if (!failing_dev)
7153                                 btrfs_warn(fs_info,
7154         "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7155                                    em->start, missing, max_tolerated);
7156                         free_extent_map(em);
7157                         ret = false;
7158                         goto out;
7159                 }
7160                 next_start = extent_map_end(em);
7161                 free_extent_map(em);
7162
7163                 read_lock(&map_tree->lock);
7164                 em = lookup_extent_mapping(map_tree, next_start,
7165                                            (u64)(-1) - next_start);
7166                 read_unlock(&map_tree->lock);
7167         }
7168 out:
7169         return ret;
7170 }
7171
7172 static void readahead_tree_node_children(struct extent_buffer *node)
7173 {
7174         int i;
7175         const int nr_items = btrfs_header_nritems(node);
7176
7177         for (i = 0; i < nr_items; i++) {
7178                 u64 start;
7179
7180                 start = btrfs_node_blockptr(node, i);
7181                 readahead_tree_block(node->fs_info, start);
7182         }
7183 }
7184
7185 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7186 {
7187         struct btrfs_root *root = fs_info->chunk_root;
7188         struct btrfs_path *path;
7189         struct extent_buffer *leaf;
7190         struct btrfs_key key;
7191         struct btrfs_key found_key;
7192         int ret;
7193         int slot;
7194         u64 total_dev = 0;
7195         u64 last_ra_node = 0;
7196
7197         path = btrfs_alloc_path();
7198         if (!path)
7199                 return -ENOMEM;
7200
7201         /*
7202          * uuid_mutex is needed only if we are mounting a sprout FS
7203          * otherwise we don't need it.
7204          */
7205         mutex_lock(&uuid_mutex);
7206
7207         /*
7208          * It is possible for mount and umount to race in such a way that
7209          * we execute this code path, but open_fs_devices failed to clear
7210          * total_rw_bytes. We certainly want it cleared before reading the
7211          * device items, so clear it here.
7212          */
7213         fs_info->fs_devices->total_rw_bytes = 0;
7214
7215         /*
7216          * Read all device items, and then all the chunk items. All
7217          * device items are found before any chunk item (their object id
7218          * is smaller than the lowest possible object id for a chunk
7219          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7220          */
7221         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7222         key.offset = 0;
7223         key.type = 0;
7224         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7225         if (ret < 0)
7226                 goto error;
7227         while (1) {
7228                 struct extent_buffer *node;
7229
7230                 leaf = path->nodes[0];
7231                 slot = path->slots[0];
7232                 if (slot >= btrfs_header_nritems(leaf)) {
7233                         ret = btrfs_next_leaf(root, path);
7234                         if (ret == 0)
7235                                 continue;
7236                         if (ret < 0)
7237                                 goto error;
7238                         break;
7239                 }
7240                 /*
7241                  * The nodes on level 1 are not locked but we don't need to do
7242                  * that during mount time as nothing else can access the tree
7243                  */
7244                 node = path->nodes[1];
7245                 if (node) {
7246                         if (last_ra_node != node->start) {
7247                                 readahead_tree_node_children(node);
7248                                 last_ra_node = node->start;
7249                         }
7250                 }
7251                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7252                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7253                         struct btrfs_dev_item *dev_item;
7254                         dev_item = btrfs_item_ptr(leaf, slot,
7255                                                   struct btrfs_dev_item);
7256                         ret = read_one_dev(leaf, dev_item);
7257                         if (ret)
7258                                 goto error;
7259                         total_dev++;
7260                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7261                         struct btrfs_chunk *chunk;
7262                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7263                         mutex_lock(&fs_info->chunk_mutex);
7264                         ret = read_one_chunk(&found_key, leaf, chunk);
7265                         mutex_unlock(&fs_info->chunk_mutex);
7266                         if (ret)
7267                                 goto error;
7268                 }
7269                 path->slots[0]++;
7270         }
7271
7272         /*
7273          * After loading chunk tree, we've got all device information,
7274          * do another round of validation checks.
7275          */
7276         if (total_dev != fs_info->fs_devices->total_devices) {
7277                 btrfs_warn(fs_info,
7278 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7279                           btrfs_super_num_devices(fs_info->super_copy),
7280                           total_dev);
7281                 fs_info->fs_devices->total_devices = total_dev;
7282                 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7283         }
7284         if (btrfs_super_total_bytes(fs_info->super_copy) <
7285             fs_info->fs_devices->total_rw_bytes) {
7286                 btrfs_err(fs_info,
7287         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7288                           btrfs_super_total_bytes(fs_info->super_copy),
7289                           fs_info->fs_devices->total_rw_bytes);
7290                 ret = -EINVAL;
7291                 goto error;
7292         }
7293         ret = 0;
7294 error:
7295         mutex_unlock(&uuid_mutex);
7296
7297         btrfs_free_path(path);
7298         return ret;
7299 }
7300
7301 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7302 {
7303         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7304         struct btrfs_device *device;
7305
7306         fs_devices->fs_info = fs_info;
7307
7308         mutex_lock(&fs_devices->device_list_mutex);
7309         list_for_each_entry(device, &fs_devices->devices, dev_list)
7310                 device->fs_info = fs_info;
7311
7312         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7313                 list_for_each_entry(device, &seed_devs->devices, dev_list)
7314                         device->fs_info = fs_info;
7315
7316                 seed_devs->fs_info = fs_info;
7317         }
7318         mutex_unlock(&fs_devices->device_list_mutex);
7319 }
7320
7321 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7322                                  const struct btrfs_dev_stats_item *ptr,
7323                                  int index)
7324 {
7325         u64 val;
7326
7327         read_extent_buffer(eb, &val,
7328                            offsetof(struct btrfs_dev_stats_item, values) +
7329                             ((unsigned long)ptr) + (index * sizeof(u64)),
7330                            sizeof(val));
7331         return val;
7332 }
7333
7334 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7335                                       struct btrfs_dev_stats_item *ptr,
7336                                       int index, u64 val)
7337 {
7338         write_extent_buffer(eb, &val,
7339                             offsetof(struct btrfs_dev_stats_item, values) +
7340                              ((unsigned long)ptr) + (index * sizeof(u64)),
7341                             sizeof(val));
7342 }
7343
7344 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7345                                        struct btrfs_path *path)
7346 {
7347         struct btrfs_dev_stats_item *ptr;
7348         struct extent_buffer *eb;
7349         struct btrfs_key key;
7350         int item_size;
7351         int i, ret, slot;
7352
7353         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7354         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7355         key.offset = device->devid;
7356         ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7357         if (ret) {
7358                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7359                         btrfs_dev_stat_set(device, i, 0);
7360                 device->dev_stats_valid = 1;
7361                 btrfs_release_path(path);
7362                 return ret < 0 ? ret : 0;
7363         }
7364         slot = path->slots[0];
7365         eb = path->nodes[0];
7366         item_size = btrfs_item_size_nr(eb, slot);
7367
7368         ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7369
7370         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7371                 if (item_size >= (1 + i) * sizeof(__le64))
7372                         btrfs_dev_stat_set(device, i,
7373                                            btrfs_dev_stats_value(eb, ptr, i));
7374                 else
7375                         btrfs_dev_stat_set(device, i, 0);
7376         }
7377
7378         device->dev_stats_valid = 1;
7379         btrfs_dev_stat_print_on_load(device);
7380         btrfs_release_path(path);
7381
7382         return 0;
7383 }
7384
7385 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7386 {
7387         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7388         struct btrfs_device *device;
7389         struct btrfs_path *path = NULL;
7390         int ret = 0;
7391
7392         path = btrfs_alloc_path();
7393         if (!path)
7394                 return -ENOMEM;
7395
7396         mutex_lock(&fs_devices->device_list_mutex);
7397         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7398                 ret = btrfs_device_init_dev_stats(device, path);
7399                 if (ret)
7400                         goto out;
7401         }
7402         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7403                 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7404                         ret = btrfs_device_init_dev_stats(device, path);
7405                         if (ret)
7406                                 goto out;
7407                 }
7408         }
7409 out:
7410         mutex_unlock(&fs_devices->device_list_mutex);
7411
7412         btrfs_free_path(path);
7413         return ret;
7414 }
7415
7416 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7417                                 struct btrfs_device *device)
7418 {
7419         struct btrfs_fs_info *fs_info = trans->fs_info;
7420         struct btrfs_root *dev_root = fs_info->dev_root;
7421         struct btrfs_path *path;
7422         struct btrfs_key key;
7423         struct extent_buffer *eb;
7424         struct btrfs_dev_stats_item *ptr;
7425         int ret;
7426         int i;
7427
7428         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7429         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7430         key.offset = device->devid;
7431
7432         path = btrfs_alloc_path();
7433         if (!path)
7434                 return -ENOMEM;
7435         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7436         if (ret < 0) {
7437                 btrfs_warn_in_rcu(fs_info,
7438                         "error %d while searching for dev_stats item for device %s",
7439                               ret, rcu_str_deref(device->name));
7440                 goto out;
7441         }
7442
7443         if (ret == 0 &&
7444             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7445                 /* need to delete old one and insert a new one */
7446                 ret = btrfs_del_item(trans, dev_root, path);
7447                 if (ret != 0) {
7448                         btrfs_warn_in_rcu(fs_info,
7449                                 "delete too small dev_stats item for device %s failed %d",
7450                                       rcu_str_deref(device->name), ret);
7451                         goto out;
7452                 }
7453                 ret = 1;
7454         }
7455
7456         if (ret == 1) {
7457                 /* need to insert a new item */
7458                 btrfs_release_path(path);
7459                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7460                                               &key, sizeof(*ptr));
7461                 if (ret < 0) {
7462                         btrfs_warn_in_rcu(fs_info,
7463                                 "insert dev_stats item for device %s failed %d",
7464                                 rcu_str_deref(device->name), ret);
7465                         goto out;
7466                 }
7467         }
7468
7469         eb = path->nodes[0];
7470         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7471         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7472                 btrfs_set_dev_stats_value(eb, ptr, i,
7473                                           btrfs_dev_stat_read(device, i));
7474         btrfs_mark_buffer_dirty(eb);
7475
7476 out:
7477         btrfs_free_path(path);
7478         return ret;
7479 }
7480
7481 /*
7482  * called from commit_transaction. Writes all changed device stats to disk.
7483  */
7484 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7485 {
7486         struct btrfs_fs_info *fs_info = trans->fs_info;
7487         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7488         struct btrfs_device *device;
7489         int stats_cnt;
7490         int ret = 0;
7491
7492         mutex_lock(&fs_devices->device_list_mutex);
7493         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7494                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7495                 if (!device->dev_stats_valid || stats_cnt == 0)
7496                         continue;
7497
7498
7499                 /*
7500                  * There is a LOAD-LOAD control dependency between the value of
7501                  * dev_stats_ccnt and updating the on-disk values which requires
7502                  * reading the in-memory counters. Such control dependencies
7503                  * require explicit read memory barriers.
7504                  *
7505                  * This memory barriers pairs with smp_mb__before_atomic in
7506                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7507                  * barrier implied by atomic_xchg in
7508                  * btrfs_dev_stats_read_and_reset
7509                  */
7510                 smp_rmb();
7511
7512                 ret = update_dev_stat_item(trans, device);
7513                 if (!ret)
7514                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7515         }
7516         mutex_unlock(&fs_devices->device_list_mutex);
7517
7518         return ret;
7519 }
7520
7521 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7522 {
7523         btrfs_dev_stat_inc(dev, index);
7524         btrfs_dev_stat_print_on_error(dev);
7525 }
7526
7527 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7528 {
7529         if (!dev->dev_stats_valid)
7530                 return;
7531         btrfs_err_rl_in_rcu(dev->fs_info,
7532                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7533                            rcu_str_deref(dev->name),
7534                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7535                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7536                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7537                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7538                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7539 }
7540
7541 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7542 {
7543         int i;
7544
7545         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7546                 if (btrfs_dev_stat_read(dev, i) != 0)
7547                         break;
7548         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7549                 return; /* all values == 0, suppress message */
7550
7551         btrfs_info_in_rcu(dev->fs_info,
7552                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7553                rcu_str_deref(dev->name),
7554                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7555                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7556                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7557                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7558                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7559 }
7560
7561 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7562                         struct btrfs_ioctl_get_dev_stats *stats)
7563 {
7564         struct btrfs_device *dev;
7565         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7566         int i;
7567
7568         mutex_lock(&fs_devices->device_list_mutex);
7569         dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7570                                 true);
7571         mutex_unlock(&fs_devices->device_list_mutex);
7572
7573         if (!dev) {
7574                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7575                 return -ENODEV;
7576         } else if (!dev->dev_stats_valid) {
7577                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7578                 return -ENODEV;
7579         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7580                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7581                         if (stats->nr_items > i)
7582                                 stats->values[i] =
7583                                         btrfs_dev_stat_read_and_reset(dev, i);
7584                         else
7585                                 btrfs_dev_stat_set(dev, i, 0);
7586                 }
7587                 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7588                            current->comm, task_pid_nr(current));
7589         } else {
7590                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7591                         if (stats->nr_items > i)
7592                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7593         }
7594         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7595                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7596         return 0;
7597 }
7598
7599 /*
7600  * Update the size and bytes used for each device where it changed.  This is
7601  * delayed since we would otherwise get errors while writing out the
7602  * superblocks.
7603  *
7604  * Must be invoked during transaction commit.
7605  */
7606 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7607 {
7608         struct btrfs_device *curr, *next;
7609
7610         ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7611
7612         if (list_empty(&trans->dev_update_list))
7613                 return;
7614
7615         /*
7616          * We don't need the device_list_mutex here.  This list is owned by the
7617          * transaction and the transaction must complete before the device is
7618          * released.
7619          */
7620         mutex_lock(&trans->fs_info->chunk_mutex);
7621         list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7622                                  post_commit_list) {
7623                 list_del_init(&curr->post_commit_list);
7624                 curr->commit_total_bytes = curr->disk_total_bytes;
7625                 curr->commit_bytes_used = curr->bytes_used;
7626         }
7627         mutex_unlock(&trans->fs_info->chunk_mutex);
7628 }
7629
7630 /*
7631  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7632  */
7633 int btrfs_bg_type_to_factor(u64 flags)
7634 {
7635         const int index = btrfs_bg_flags_to_raid_index(flags);
7636
7637         return btrfs_raid_array[index].ncopies;
7638 }
7639
7640
7641
7642 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7643                                  u64 chunk_offset, u64 devid,
7644                                  u64 physical_offset, u64 physical_len)
7645 {
7646         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7647         struct extent_map *em;
7648         struct map_lookup *map;
7649         struct btrfs_device *dev;
7650         u64 stripe_len;
7651         bool found = false;
7652         int ret = 0;
7653         int i;
7654
7655         read_lock(&em_tree->lock);
7656         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7657         read_unlock(&em_tree->lock);
7658
7659         if (!em) {
7660                 btrfs_err(fs_info,
7661 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7662                           physical_offset, devid);
7663                 ret = -EUCLEAN;
7664                 goto out;
7665         }
7666
7667         map = em->map_lookup;
7668         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7669         if (physical_len != stripe_len) {
7670                 btrfs_err(fs_info,
7671 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7672                           physical_offset, devid, em->start, physical_len,
7673                           stripe_len);
7674                 ret = -EUCLEAN;
7675                 goto out;
7676         }
7677
7678         for (i = 0; i < map->num_stripes; i++) {
7679                 if (map->stripes[i].dev->devid == devid &&
7680                     map->stripes[i].physical == physical_offset) {
7681                         found = true;
7682                         if (map->verified_stripes >= map->num_stripes) {
7683                                 btrfs_err(fs_info,
7684                                 "too many dev extents for chunk %llu found",
7685                                           em->start);
7686                                 ret = -EUCLEAN;
7687                                 goto out;
7688                         }
7689                         map->verified_stripes++;
7690                         break;
7691                 }
7692         }
7693         if (!found) {
7694                 btrfs_err(fs_info,
7695         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7696                         physical_offset, devid);
7697                 ret = -EUCLEAN;
7698         }
7699
7700         /* Make sure no dev extent is beyond device bondary */
7701         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7702         if (!dev) {
7703                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7704                 ret = -EUCLEAN;
7705                 goto out;
7706         }
7707
7708         /* It's possible this device is a dummy for seed device */
7709         if (dev->disk_total_bytes == 0) {
7710                 struct btrfs_fs_devices *devs;
7711
7712                 devs = list_first_entry(&fs_info->fs_devices->seed_list,
7713                                         struct btrfs_fs_devices, seed_list);
7714                 dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7715                 if (!dev) {
7716                         btrfs_err(fs_info, "failed to find seed devid %llu",
7717                                   devid);
7718                         ret = -EUCLEAN;
7719                         goto out;
7720                 }
7721         }
7722
7723         if (physical_offset + physical_len > dev->disk_total_bytes) {
7724                 btrfs_err(fs_info,
7725 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7726                           devid, physical_offset, physical_len,
7727                           dev->disk_total_bytes);
7728                 ret = -EUCLEAN;
7729                 goto out;
7730         }
7731 out:
7732         free_extent_map(em);
7733         return ret;
7734 }
7735
7736 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7737 {
7738         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7739         struct extent_map *em;
7740         struct rb_node *node;
7741         int ret = 0;
7742
7743         read_lock(&em_tree->lock);
7744         for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7745                 em = rb_entry(node, struct extent_map, rb_node);
7746                 if (em->map_lookup->num_stripes !=
7747                     em->map_lookup->verified_stripes) {
7748                         btrfs_err(fs_info,
7749                         "chunk %llu has missing dev extent, have %d expect %d",
7750                                   em->start, em->map_lookup->verified_stripes,
7751                                   em->map_lookup->num_stripes);
7752                         ret = -EUCLEAN;
7753                         goto out;
7754                 }
7755         }
7756 out:
7757         read_unlock(&em_tree->lock);
7758         return ret;
7759 }
7760
7761 /*
7762  * Ensure that all dev extents are mapped to correct chunk, otherwise
7763  * later chunk allocation/free would cause unexpected behavior.
7764  *
7765  * NOTE: This will iterate through the whole device tree, which should be of
7766  * the same size level as the chunk tree.  This slightly increases mount time.
7767  */
7768 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7769 {
7770         struct btrfs_path *path;
7771         struct btrfs_root *root = fs_info->dev_root;
7772         struct btrfs_key key;
7773         u64 prev_devid = 0;
7774         u64 prev_dev_ext_end = 0;
7775         int ret = 0;
7776
7777         key.objectid = 1;
7778         key.type = BTRFS_DEV_EXTENT_KEY;
7779         key.offset = 0;
7780
7781         path = btrfs_alloc_path();
7782         if (!path)
7783                 return -ENOMEM;
7784
7785         path->reada = READA_FORWARD;
7786         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7787         if (ret < 0)
7788                 goto out;
7789
7790         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7791                 ret = btrfs_next_item(root, path);
7792                 if (ret < 0)
7793                         goto out;
7794                 /* No dev extents at all? Not good */
7795                 if (ret > 0) {
7796                         ret = -EUCLEAN;
7797                         goto out;
7798                 }
7799         }
7800         while (1) {
7801                 struct extent_buffer *leaf = path->nodes[0];
7802                 struct btrfs_dev_extent *dext;
7803                 int slot = path->slots[0];
7804                 u64 chunk_offset;
7805                 u64 physical_offset;
7806                 u64 physical_len;
7807                 u64 devid;
7808
7809                 btrfs_item_key_to_cpu(leaf, &key, slot);
7810                 if (key.type != BTRFS_DEV_EXTENT_KEY)
7811                         break;
7812                 devid = key.objectid;
7813                 physical_offset = key.offset;
7814
7815                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7816                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7817                 physical_len = btrfs_dev_extent_length(leaf, dext);
7818
7819                 /* Check if this dev extent overlaps with the previous one */
7820                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7821                         btrfs_err(fs_info,
7822 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7823                                   devid, physical_offset, prev_dev_ext_end);
7824                         ret = -EUCLEAN;
7825                         goto out;
7826                 }
7827
7828                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7829                                             physical_offset, physical_len);
7830                 if (ret < 0)
7831                         goto out;
7832                 prev_devid = devid;
7833                 prev_dev_ext_end = physical_offset + physical_len;
7834
7835                 ret = btrfs_next_item(root, path);
7836                 if (ret < 0)
7837                         goto out;
7838                 if (ret > 0) {
7839                         ret = 0;
7840                         break;
7841                 }
7842         }
7843
7844         /* Ensure all chunks have corresponding dev extents */
7845         ret = verify_chunk_dev_extent_mapping(fs_info);
7846 out:
7847         btrfs_free_path(path);
7848         return ret;
7849 }
7850
7851 /*
7852  * Check whether the given block group or device is pinned by any inode being
7853  * used as a swapfile.
7854  */
7855 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7856 {
7857         struct btrfs_swapfile_pin *sp;
7858         struct rb_node *node;
7859
7860         spin_lock(&fs_info->swapfile_pins_lock);
7861         node = fs_info->swapfile_pins.rb_node;
7862         while (node) {
7863                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7864                 if (ptr < sp->ptr)
7865                         node = node->rb_left;
7866                 else if (ptr > sp->ptr)
7867                         node = node->rb_right;
7868                 else
7869                         break;
7870         }
7871         spin_unlock(&fs_info->swapfile_pins_lock);
7872         return node != NULL;
7873 }