1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #ifndef BTRFS_VOLUMES_H
7 #define BTRFS_VOLUMES_H
10 #include <linux/sort.h>
11 #include <linux/btrfs.h>
12 #include "async-thread.h"
14 #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
16 extern struct mutex uuid_mutex;
18 #define BTRFS_STRIPE_LEN SZ_64K
20 struct btrfs_io_geometry {
21 /* remaining bytes before crossing a stripe */
23 /* offset of logical address in chunk */
25 /* length of single IO stripe */
27 /* number of stripe where address falls */
29 /* offset of address in stripe */
31 /* offset of raid56 stripe into the chunk */
32 u64 raid56_stripe_offset;
36 * Use sequence counter to get consistent device stat data on
39 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
40 #include <linux/seqlock.h>
41 #define __BTRFS_NEED_DEVICE_DATA_ORDERED
42 #define btrfs_device_data_ordered_init(device) \
43 seqcount_init(&device->data_seqcount)
45 #define btrfs_device_data_ordered_init(device) do { } while (0)
48 #define BTRFS_DEV_STATE_WRITEABLE (0)
49 #define BTRFS_DEV_STATE_IN_FS_METADATA (1)
50 #define BTRFS_DEV_STATE_MISSING (2)
51 #define BTRFS_DEV_STATE_REPLACE_TGT (3)
52 #define BTRFS_DEV_STATE_FLUSH_SENT (4)
53 #define BTRFS_DEV_STATE_NO_READA (5)
56 struct list_head dev_list; /* device_list_mutex */
57 struct list_head dev_alloc_list; /* chunk mutex */
58 struct list_head post_commit_list; /* chunk mutex */
59 struct btrfs_fs_devices *fs_devices;
60 struct btrfs_fs_info *fs_info;
62 struct rcu_string __rcu *name;
66 struct block_device *bdev;
68 /* the mode sent to blkdev_get */
71 unsigned long dev_state;
72 blk_status_t last_flush_error;
74 #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
75 seqcount_t data_seqcount;
78 /* the internal btrfs device id */
81 /* size of the device in memory */
84 /* size of the device on disk */
90 /* optimal io alignment for this device */
93 /* optimal io width for this device */
95 /* type and info about this device */
98 /* minimal io size for this device */
101 /* physical drive uuid (or lvm uuid) */
102 u8 uuid[BTRFS_UUID_SIZE];
105 * size of the device on the current transaction
107 * This variant is update when committing the transaction,
108 * and protected by chunk mutex
110 u64 commit_total_bytes;
112 /* bytes used on the current transaction */
113 u64 commit_bytes_used;
115 /* for sending down flush barriers */
116 struct bio *flush_bio;
117 struct completion flush_wait;
119 /* per-device scrub information */
120 struct scrub_ctx *scrub_ctx;
122 /* readahead state */
123 atomic_t reada_in_flight;
125 struct reada_zone *reada_curr_zone;
126 struct radix_tree_root reada_zones;
127 struct radix_tree_root reada_extents;
129 /* disk I/O failure stats. For detailed description refer to
130 * enum btrfs_dev_stat_values in ioctl.h */
133 /* Counter to record the change of device stats */
134 atomic_t dev_stats_ccnt;
135 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
137 struct extent_io_tree alloc_state;
139 struct completion kobj_unregister;
140 /* For sysfs/FSID/devinfo/devid/ */
141 struct kobject devid_kobj;
145 * If we read those variants at the context of their own lock, we needn't
146 * use the following helpers, reading them directly is safe.
148 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
149 #define BTRFS_DEVICE_GETSET_FUNCS(name) \
151 btrfs_device_get_##name(const struct btrfs_device *dev) \
157 seq = read_seqcount_begin(&dev->data_seqcount); \
159 } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
164 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
167 write_seqcount_begin(&dev->data_seqcount); \
169 write_seqcount_end(&dev->data_seqcount); \
172 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
173 #define BTRFS_DEVICE_GETSET_FUNCS(name) \
175 btrfs_device_get_##name(const struct btrfs_device *dev) \
186 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
193 #define BTRFS_DEVICE_GETSET_FUNCS(name) \
195 btrfs_device_get_##name(const struct btrfs_device *dev) \
201 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
207 BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
208 BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
209 BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
211 enum btrfs_chunk_allocation_policy {
212 BTRFS_CHUNK_ALLOC_REGULAR,
215 struct btrfs_fs_devices {
216 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
217 u8 metadata_uuid[BTRFS_FSID_SIZE];
219 struct list_head fs_list;
228 /* Highest generation number of seen devices */
229 u64 latest_generation;
231 struct block_device *latest_bdev;
233 /* all of the devices in the FS, protected by a mutex
234 * so we can safely walk it to write out the supers without
235 * worrying about add/remove by the multi-device code.
236 * Scrubbing super can kick off supers writing by holding
239 struct mutex device_list_mutex;
241 /* List of all devices, protected by device_list_mutex */
242 struct list_head devices;
245 * Devices which can satisfy space allocation. Protected by
248 struct list_head alloc_list;
250 struct list_head seed_list;
255 /* set when we find or add a device that doesn't have the
260 struct btrfs_fs_info *fs_info;
262 struct kobject fsid_kobj;
263 struct kobject *devices_kobj;
264 struct kobject *devinfo_kobj;
265 struct completion kobj_unregister;
267 enum btrfs_chunk_allocation_policy chunk_alloc_policy;
270 #define BTRFS_BIO_INLINE_CSUM_SIZE 64
272 #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
273 - sizeof(struct btrfs_chunk)) \
274 / sizeof(struct btrfs_stripe) + 1)
276 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
277 - 2 * sizeof(struct btrfs_disk_key) \
278 - 2 * sizeof(struct btrfs_chunk)) \
279 / sizeof(struct btrfs_stripe) + 1)
282 * we need the mirror number and stripe index to be passed around
283 * the call chain while we are processing end_io (especially errors).
284 * Really, what we need is a btrfs_bio structure that has this info
285 * and is properly sized with its stripe array, but we're not there
286 * quite yet. We have our own btrfs bioset, and all of the bios
287 * we allocate are actually btrfs_io_bios. We'll cram as much of
288 * struct btrfs_bio as we can into this over time.
290 struct btrfs_io_bio {
291 unsigned int mirror_num;
292 struct btrfs_device *device;
295 u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
296 struct bvec_iter iter;
298 * This member must come last, bio_alloc_bioset will allocate enough
299 * bytes for entire btrfs_io_bio but relies on bio being last.
304 static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
306 return container_of(bio, struct btrfs_io_bio, bio);
309 static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
311 if (io_bio->csum != io_bio->csum_inline) {
317 struct btrfs_bio_stripe {
318 struct btrfs_device *dev;
320 u64 length; /* only used for discard mappings */
325 atomic_t stripes_pending;
326 struct btrfs_fs_info *fs_info;
327 u64 map_type; /* get from map_lookup->type */
328 bio_end_io_t *end_io;
329 struct bio *orig_bio;
338 * logical block numbers for the start of each stripe
339 * The last one or two are p/q. These are sorted,
340 * so raid_map[0] is the start of our full stripe
343 struct btrfs_bio_stripe stripes[];
346 struct btrfs_device_info {
347 struct btrfs_device *dev;
353 struct btrfs_raid_attr {
354 u8 sub_stripes; /* sub_stripes info for map */
355 u8 dev_stripes; /* stripes per dev */
356 u8 devs_max; /* max devs to use */
357 u8 devs_min; /* min devs needed */
358 u8 tolerated_failures; /* max tolerated fail devs */
359 u8 devs_increment; /* ndevs has to be a multiple of this */
360 u8 ncopies; /* how many copies to data has */
361 u8 nparity; /* number of stripes worth of bytes to store
362 * parity information */
363 u8 mindev_error; /* error code if min devs requisite is unmet */
364 const char raid_name[8]; /* name of the raid */
365 u64 bg_flag; /* block group flag of the raid */
368 extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
377 int verified_stripes; /* For mount time dev extent verification */
378 struct btrfs_bio_stripe stripes[];
381 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
382 (sizeof(struct btrfs_bio_stripe) * (n)))
384 struct btrfs_balance_args;
385 struct btrfs_balance_progress;
386 struct btrfs_balance_control {
387 struct btrfs_balance_args data;
388 struct btrfs_balance_args meta;
389 struct btrfs_balance_args sys;
393 struct btrfs_balance_progress stat;
400 BTRFS_MAP_GET_READ_MIRRORS,
403 static inline enum btrfs_map_op btrfs_op(struct bio *bio)
405 switch (bio_op(bio)) {
407 return BTRFS_MAP_DISCARD;
409 return BTRFS_MAP_WRITE;
414 return BTRFS_MAP_READ;
418 void btrfs_get_bbio(struct btrfs_bio *bbio);
419 void btrfs_put_bbio(struct btrfs_bio *bbio);
420 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
421 u64 logical, u64 *length,
422 struct btrfs_bio **bbio_ret, int mirror_num);
423 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
424 u64 logical, u64 *length,
425 struct btrfs_bio **bbio_ret);
426 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
427 u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
428 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
429 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
430 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
431 void btrfs_mapping_tree_free(struct extent_map_tree *tree);
432 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
434 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
435 fmode_t flags, void *holder);
436 struct btrfs_device *btrfs_scan_one_device(const char *path,
437 fmode_t flags, void *holder);
438 int btrfs_forget_devices(const char *path);
439 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
440 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
441 void btrfs_assign_next_active_device(struct btrfs_device *device,
442 struct btrfs_device *this_dev);
443 struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
445 const char *devpath);
446 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
449 void btrfs_free_device(struct btrfs_device *device);
450 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
451 const char *device_path, u64 devid);
452 void __exit btrfs_cleanup_fs_uuids(void);
453 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
454 int btrfs_grow_device(struct btrfs_trans_handle *trans,
455 struct btrfs_device *device, u64 new_size);
456 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
457 u64 devid, u8 *uuid, u8 *fsid, bool seed);
458 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
459 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
460 int btrfs_balance(struct btrfs_fs_info *fs_info,
461 struct btrfs_balance_control *bctl,
462 struct btrfs_ioctl_balance_args *bargs);
463 void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
464 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
465 int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
466 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
467 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
468 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
469 int btrfs_uuid_scan_kthread(void *data);
470 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
471 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
472 u64 *start, u64 *max_avail);
473 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
474 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
475 struct btrfs_ioctl_get_dev_stats *stats);
476 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
477 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
478 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
479 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
480 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
481 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
482 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
483 u64 logical, u64 len);
484 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
486 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
487 u64 chunk_offset, u64 chunk_size);
488 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
489 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
490 u64 logical, u64 length);
491 void btrfs_release_disk_super(struct btrfs_super_block *super);
493 static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
496 atomic_inc(dev->dev_stat_values + index);
498 * This memory barrier orders stores updating statistics before stores
499 * updating dev_stats_ccnt.
501 * It pairs with smp_rmb() in btrfs_run_dev_stats().
503 smp_mb__before_atomic();
504 atomic_inc(&dev->dev_stats_ccnt);
507 static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
510 return atomic_read(dev->dev_stat_values + index);
513 static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
518 ret = atomic_xchg(dev->dev_stat_values + index, 0);
520 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
521 * - RMW operations that have a return value are fully ordered;
523 * This implicit memory barriers is paired with the smp_rmb in
524 * btrfs_run_dev_stats
526 atomic_inc(&dev->dev_stats_ccnt);
530 static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
531 int index, unsigned long val)
533 atomic_set(dev->dev_stat_values + index, val);
535 * This memory barrier orders stores updating statistics before stores
536 * updating dev_stats_ccnt.
538 * It pairs with smp_rmb() in btrfs_run_dev_stats().
540 smp_mb__before_atomic();
541 atomic_inc(&dev->dev_stats_ccnt);
545 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
546 * can be used as index to access btrfs_raid_array[].
548 static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
550 if (flags & BTRFS_BLOCK_GROUP_RAID10)
551 return BTRFS_RAID_RAID10;
552 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
553 return BTRFS_RAID_RAID1;
554 else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
555 return BTRFS_RAID_RAID1C3;
556 else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
557 return BTRFS_RAID_RAID1C4;
558 else if (flags & BTRFS_BLOCK_GROUP_DUP)
559 return BTRFS_RAID_DUP;
560 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
561 return BTRFS_RAID_RAID0;
562 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
563 return BTRFS_RAID_RAID5;
564 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
565 return BTRFS_RAID_RAID6;
567 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
570 void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
572 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
573 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
574 struct btrfs_device *failing_dev);
575 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
576 struct block_device *bdev,
577 const char *device_path);
579 int btrfs_bg_type_to_factor(u64 flags);
580 const char *btrfs_bg_type_to_raid_name(u64 flags);
581 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
583 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
584 u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb);