2 * Copyright (C) 2011-2012 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
20 * - A superblock in block zero, taking up fewer than 512 bytes for
23 * - A space map managing the metadata blocks.
25 * - A space map managing the data blocks.
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
42 * Space maps have 2 btrees:
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
54 * 3 - ref count is higher than 2
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
75 #define DM_MSG_PREFIX "thin metadata"
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define THIN_METADATA_CACHE_SIZE 64
81 #define SECTOR_TO_BLOCK_SHIFT 3
85 * 3 for btree insert +
86 * 2 for btree lookup used within space map
88 * 2 for shadow spine +
89 * 4 for rebalance 3 child node
91 #define THIN_MAX_CONCURRENT_LOCKS 6
93 /* This should be plenty */
94 #define SPACE_MAP_ROOT_SIZE 128
97 * Little endian on-disk superblock and device details.
99 struct thin_disk_superblock {
100 __le32 csum; /* Checksum of superblock except for this field. */
102 __le64 blocknr; /* This block number, dm_block_t. */
112 * Root held by userspace transactions.
116 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
117 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
120 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
122 __le64 data_mapping_root;
125 * Device detail root mapping dev_id -> device_details
127 __le64 device_details_root;
129 __le32 data_block_size; /* In 512-byte sectors. */
131 __le32 metadata_block_size; /* In 512-byte sectors. */
132 __le64 metadata_nr_blocks;
135 __le32 compat_ro_flags;
136 __le32 incompat_flags;
139 struct disk_device_details {
140 __le64 mapped_blocks;
141 __le64 transaction_id; /* When created. */
142 __le32 creation_time;
143 __le32 snapshotted_time;
146 struct dm_pool_metadata {
147 struct hlist_node hash;
149 struct block_device *bdev;
150 struct dm_block_manager *bm;
151 struct dm_space_map *metadata_sm;
152 struct dm_space_map *data_sm;
153 struct dm_transaction_manager *tm;
154 struct dm_transaction_manager *nb_tm;
158 * First level holds thin_dev_t.
159 * Second level holds mappings.
161 struct dm_btree_info info;
164 * Non-blocking version of the above.
166 struct dm_btree_info nb_info;
169 * Just the top level for deleting whole devices.
171 struct dm_btree_info tl_info;
174 * Just the bottom level for creating new devices.
176 struct dm_btree_info bl_info;
179 * Describes the device details btree.
181 struct dm_btree_info details_info;
183 struct rw_semaphore root_lock;
186 dm_block_t details_root;
187 struct list_head thin_devices;
190 sector_t data_block_size;
193 * We reserve a section of the metadata for commit overhead.
194 * All reported space does *not* include this.
196 dm_block_t metadata_reserve;
199 * Set if a transaction has to be aborted but the attempt to roll back
200 * to the previous (good) transaction failed. The only pool metadata
201 * operation possible in this state is the closing of the device.
206 * Reading the space map roots can fail, so we read it into these
207 * buffers before the superblock is locked and updated.
209 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
210 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
213 struct dm_thin_device {
214 struct list_head list;
215 struct dm_pool_metadata *pmd;
220 bool aborted_with_changes:1;
221 uint64_t mapped_blocks;
222 uint64_t transaction_id;
223 uint32_t creation_time;
224 uint32_t snapshotted_time;
227 /*----------------------------------------------------------------
228 * superblock validator
229 *--------------------------------------------------------------*/
231 #define SUPERBLOCK_CSUM_XOR 160774
233 static void sb_prepare_for_write(struct dm_block_validator *v,
237 struct thin_disk_superblock *disk_super = dm_block_data(b);
239 disk_super->blocknr = cpu_to_le64(dm_block_location(b));
240 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
241 block_size - sizeof(__le32),
242 SUPERBLOCK_CSUM_XOR));
245 static int sb_check(struct dm_block_validator *v,
249 struct thin_disk_superblock *disk_super = dm_block_data(b);
252 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
253 DMERR("sb_check failed: blocknr %llu: "
254 "wanted %llu", le64_to_cpu(disk_super->blocknr),
255 (unsigned long long)dm_block_location(b));
259 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
260 DMERR("sb_check failed: magic %llu: "
261 "wanted %llu", le64_to_cpu(disk_super->magic),
262 (unsigned long long)THIN_SUPERBLOCK_MAGIC);
266 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
267 block_size - sizeof(__le32),
268 SUPERBLOCK_CSUM_XOR));
269 if (csum_le != disk_super->csum) {
270 DMERR("sb_check failed: csum %u: wanted %u",
271 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
278 static struct dm_block_validator sb_validator = {
279 .name = "superblock",
280 .prepare_for_write = sb_prepare_for_write,
284 /*----------------------------------------------------------------
285 * Methods for the btree value types
286 *--------------------------------------------------------------*/
288 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
290 return (b << 24) | t;
293 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
296 *t = v & ((1 << 24) - 1);
299 static void data_block_inc(void *context, const void *value_le)
301 struct dm_space_map *sm = context;
306 memcpy(&v_le, value_le, sizeof(v_le));
307 unpack_block_time(le64_to_cpu(v_le), &b, &t);
308 dm_sm_inc_block(sm, b);
311 static void data_block_dec(void *context, const void *value_le)
313 struct dm_space_map *sm = context;
318 memcpy(&v_le, value_le, sizeof(v_le));
319 unpack_block_time(le64_to_cpu(v_le), &b, &t);
320 dm_sm_dec_block(sm, b);
323 static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
329 memcpy(&v1_le, value1_le, sizeof(v1_le));
330 memcpy(&v2_le, value2_le, sizeof(v2_le));
331 unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
332 unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
337 static void subtree_inc(void *context, const void *value)
339 struct dm_btree_info *info = context;
343 memcpy(&root_le, value, sizeof(root_le));
344 root = le64_to_cpu(root_le);
345 dm_tm_inc(info->tm, root);
348 static void subtree_dec(void *context, const void *value)
350 struct dm_btree_info *info = context;
354 memcpy(&root_le, value, sizeof(root_le));
355 root = le64_to_cpu(root_le);
356 if (dm_btree_del(info, root))
357 DMERR("btree delete failed");
360 static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
363 memcpy(&v1_le, value1_le, sizeof(v1_le));
364 memcpy(&v2_le, value2_le, sizeof(v2_le));
366 return v1_le == v2_le;
369 /*----------------------------------------------------------------*/
371 static int superblock_lock_zero(struct dm_pool_metadata *pmd,
372 struct dm_block **sblock)
374 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
375 &sb_validator, sblock);
378 static int superblock_lock(struct dm_pool_metadata *pmd,
379 struct dm_block **sblock)
381 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
382 &sb_validator, sblock);
385 static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
390 __le64 *data_le, zero = cpu_to_le64(0);
391 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
394 * We can't use a validator here - it may be all zeroes.
396 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
400 data_le = dm_block_data(b);
402 for (i = 0; i < block_size; i++) {
403 if (data_le[i] != zero) {
414 static void __setup_btree_details(struct dm_pool_metadata *pmd)
416 pmd->info.tm = pmd->tm;
417 pmd->info.levels = 2;
418 pmd->info.value_type.context = pmd->data_sm;
419 pmd->info.value_type.size = sizeof(__le64);
420 pmd->info.value_type.inc = data_block_inc;
421 pmd->info.value_type.dec = data_block_dec;
422 pmd->info.value_type.equal = data_block_equal;
424 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
425 pmd->nb_info.tm = pmd->nb_tm;
427 pmd->tl_info.tm = pmd->tm;
428 pmd->tl_info.levels = 1;
429 pmd->tl_info.value_type.context = &pmd->bl_info;
430 pmd->tl_info.value_type.size = sizeof(__le64);
431 pmd->tl_info.value_type.inc = subtree_inc;
432 pmd->tl_info.value_type.dec = subtree_dec;
433 pmd->tl_info.value_type.equal = subtree_equal;
435 pmd->bl_info.tm = pmd->tm;
436 pmd->bl_info.levels = 1;
437 pmd->bl_info.value_type.context = pmd->data_sm;
438 pmd->bl_info.value_type.size = sizeof(__le64);
439 pmd->bl_info.value_type.inc = data_block_inc;
440 pmd->bl_info.value_type.dec = data_block_dec;
441 pmd->bl_info.value_type.equal = data_block_equal;
443 pmd->details_info.tm = pmd->tm;
444 pmd->details_info.levels = 1;
445 pmd->details_info.value_type.context = NULL;
446 pmd->details_info.value_type.size = sizeof(struct disk_device_details);
447 pmd->details_info.value_type.inc = NULL;
448 pmd->details_info.value_type.dec = NULL;
449 pmd->details_info.value_type.equal = NULL;
452 static int save_sm_roots(struct dm_pool_metadata *pmd)
457 r = dm_sm_root_size(pmd->metadata_sm, &len);
461 r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
465 r = dm_sm_root_size(pmd->data_sm, &len);
469 return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
472 static void copy_sm_roots(struct dm_pool_metadata *pmd,
473 struct thin_disk_superblock *disk)
475 memcpy(&disk->metadata_space_map_root,
476 &pmd->metadata_space_map_root,
477 sizeof(pmd->metadata_space_map_root));
479 memcpy(&disk->data_space_map_root,
480 &pmd->data_space_map_root,
481 sizeof(pmd->data_space_map_root));
484 static int __write_initial_superblock(struct dm_pool_metadata *pmd)
487 struct dm_block *sblock;
488 struct thin_disk_superblock *disk_super;
489 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
491 if (bdev_size > THIN_METADATA_MAX_SECTORS)
492 bdev_size = THIN_METADATA_MAX_SECTORS;
494 r = dm_sm_commit(pmd->data_sm);
498 r = dm_tm_pre_commit(pmd->tm);
502 r = save_sm_roots(pmd);
506 r = superblock_lock_zero(pmd, &sblock);
510 disk_super = dm_block_data(sblock);
511 disk_super->flags = 0;
512 memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
513 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
514 disk_super->version = cpu_to_le32(THIN_VERSION);
515 disk_super->time = 0;
516 disk_super->trans_id = 0;
517 disk_super->held_root = 0;
519 copy_sm_roots(pmd, disk_super);
521 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
522 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
523 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
524 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
525 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
527 return dm_tm_commit(pmd->tm, sblock);
530 static int __format_metadata(struct dm_pool_metadata *pmd)
534 r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
535 &pmd->tm, &pmd->metadata_sm);
537 DMERR("tm_create_with_sm failed");
541 pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
542 if (IS_ERR(pmd->data_sm)) {
543 DMERR("sm_disk_create failed");
544 r = PTR_ERR(pmd->data_sm);
548 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
550 DMERR("could not create non-blocking clone tm");
552 goto bad_cleanup_data_sm;
555 __setup_btree_details(pmd);
557 r = dm_btree_empty(&pmd->info, &pmd->root);
559 goto bad_cleanup_nb_tm;
561 r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
563 DMERR("couldn't create devices root");
564 goto bad_cleanup_nb_tm;
567 r = __write_initial_superblock(pmd);
569 goto bad_cleanup_nb_tm;
574 dm_tm_destroy(pmd->nb_tm);
576 dm_sm_destroy(pmd->data_sm);
578 dm_tm_destroy(pmd->tm);
579 dm_sm_destroy(pmd->metadata_sm);
584 static int __check_incompat_features(struct thin_disk_superblock *disk_super,
585 struct dm_pool_metadata *pmd)
589 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
591 DMERR("could not access metadata due to unsupported optional features (%lx).",
592 (unsigned long)features);
597 * Check for read-only metadata to skip the following RDWR checks.
599 if (get_disk_ro(pmd->bdev->bd_disk))
602 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
604 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
605 (unsigned long)features);
612 static int __open_metadata(struct dm_pool_metadata *pmd)
615 struct dm_block *sblock;
616 struct thin_disk_superblock *disk_super;
618 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
619 &sb_validator, &sblock);
621 DMERR("couldn't read superblock");
625 disk_super = dm_block_data(sblock);
627 /* Verify the data block size hasn't changed */
628 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
629 DMERR("changing the data block size (from %u to %llu) is not supported",
630 le32_to_cpu(disk_super->data_block_size),
631 (unsigned long long)pmd->data_block_size);
633 goto bad_unlock_sblock;
636 r = __check_incompat_features(disk_super, pmd);
638 goto bad_unlock_sblock;
640 r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
641 disk_super->metadata_space_map_root,
642 sizeof(disk_super->metadata_space_map_root),
643 &pmd->tm, &pmd->metadata_sm);
645 DMERR("tm_open_with_sm failed");
646 goto bad_unlock_sblock;
649 pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
650 sizeof(disk_super->data_space_map_root));
651 if (IS_ERR(pmd->data_sm)) {
652 DMERR("sm_disk_open failed");
653 r = PTR_ERR(pmd->data_sm);
657 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
659 DMERR("could not create non-blocking clone tm");
661 goto bad_cleanup_data_sm;
665 * For pool metadata opening process, root setting is redundant
666 * because it will be set again in __begin_transaction(). But dm
667 * pool aborting process really needs to get last transaction's
668 * root to avoid accessing broken btree.
670 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
671 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
673 __setup_btree_details(pmd);
674 dm_bm_unlock(sblock);
679 dm_sm_destroy(pmd->data_sm);
681 dm_tm_destroy(pmd->tm);
682 dm_sm_destroy(pmd->metadata_sm);
684 dm_bm_unlock(sblock);
689 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
693 r = __superblock_all_zeroes(pmd->bm, &unformatted);
698 return format_device ? __format_metadata(pmd) : -EPERM;
700 return __open_metadata(pmd);
703 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
707 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
708 THIN_METADATA_CACHE_SIZE,
709 THIN_MAX_CONCURRENT_LOCKS);
710 if (IS_ERR(pmd->bm)) {
711 DMERR("could not create block manager");
712 r = PTR_ERR(pmd->bm);
717 r = __open_or_format_metadata(pmd, format_device);
719 dm_block_manager_destroy(pmd->bm);
726 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
728 dm_sm_destroy(pmd->data_sm);
729 dm_sm_destroy(pmd->metadata_sm);
730 dm_tm_destroy(pmd->nb_tm);
731 dm_tm_destroy(pmd->tm);
732 dm_block_manager_destroy(pmd->bm);
735 static int __begin_transaction(struct dm_pool_metadata *pmd)
738 struct thin_disk_superblock *disk_super;
739 struct dm_block *sblock;
742 * We re-read the superblock every time. Shouldn't need to do this
745 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
746 &sb_validator, &sblock);
750 disk_super = dm_block_data(sblock);
751 pmd->time = le32_to_cpu(disk_super->time);
752 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
753 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
754 pmd->trans_id = le64_to_cpu(disk_super->trans_id);
755 pmd->flags = le32_to_cpu(disk_super->flags);
756 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
758 dm_bm_unlock(sblock);
762 static int __write_changed_details(struct dm_pool_metadata *pmd)
765 struct dm_thin_device *td, *tmp;
766 struct disk_device_details details;
769 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
775 details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
776 details.transaction_id = cpu_to_le64(td->transaction_id);
777 details.creation_time = cpu_to_le32(td->creation_time);
778 details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
779 __dm_bless_for_disk(&details);
781 r = dm_btree_insert(&pmd->details_info, pmd->details_root,
782 &key, &details, &pmd->details_root);
797 static int __commit_transaction(struct dm_pool_metadata *pmd)
800 size_t metadata_len, data_len;
801 struct thin_disk_superblock *disk_super;
802 struct dm_block *sblock;
805 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
807 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
809 r = __write_changed_details(pmd);
813 r = dm_sm_commit(pmd->data_sm);
817 r = dm_tm_pre_commit(pmd->tm);
821 r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
825 r = dm_sm_root_size(pmd->data_sm, &data_len);
829 r = save_sm_roots(pmd);
833 r = superblock_lock(pmd, &sblock);
837 disk_super = dm_block_data(sblock);
838 disk_super->time = cpu_to_le32(pmd->time);
839 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
840 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
841 disk_super->trans_id = cpu_to_le64(pmd->trans_id);
842 disk_super->flags = cpu_to_le32(pmd->flags);
844 copy_sm_roots(pmd, disk_super);
846 return dm_tm_commit(pmd->tm, sblock);
849 static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
853 dm_block_t max_blocks = 4096; /* 16M */
855 r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
857 DMERR("could not get size of metadata device");
858 pmd->metadata_reserve = max_blocks;
860 pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
863 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
864 sector_t data_block_size,
868 struct dm_pool_metadata *pmd;
870 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
872 DMERR("could not allocate metadata struct");
873 return ERR_PTR(-ENOMEM);
876 init_rwsem(&pmd->root_lock);
878 INIT_LIST_HEAD(&pmd->thin_devices);
879 pmd->fail_io = false;
881 pmd->data_block_size = data_block_size;
883 r = __create_persistent_data_objects(pmd, format_device);
889 r = __begin_transaction(pmd);
891 if (dm_pool_metadata_close(pmd) < 0)
892 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
896 __set_metadata_reserve(pmd);
901 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
904 unsigned open_devices = 0;
905 struct dm_thin_device *td, *tmp;
907 down_read(&pmd->root_lock);
908 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
916 up_read(&pmd->root_lock);
919 DMERR("attempt to close pmd when %u device(s) are still open",
924 if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
925 r = __commit_transaction(pmd);
927 DMWARN("%s: __commit_transaction() failed, error = %d",
932 __destroy_persistent_data_objects(pmd);
939 * __open_device: Returns @td corresponding to device with id @dev,
940 * creating it if @create is set and incrementing @td->open_count.
941 * On failure, @td is undefined.
943 static int __open_device(struct dm_pool_metadata *pmd,
944 dm_thin_id dev, int create,
945 struct dm_thin_device **td)
948 struct dm_thin_device *td2;
950 struct disk_device_details details_le;
953 * If the device is already open, return it.
955 list_for_each_entry(td2, &pmd->thin_devices, list)
956 if (td2->id == dev) {
958 * May not create an already-open device.
969 * Check the device exists.
971 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
974 if (r != -ENODATA || !create)
981 details_le.mapped_blocks = 0;
982 details_le.transaction_id = cpu_to_le64(pmd->trans_id);
983 details_le.creation_time = cpu_to_le32(pmd->time);
984 details_le.snapshotted_time = cpu_to_le32(pmd->time);
987 *td = kmalloc(sizeof(**td), GFP_NOIO);
993 (*td)->open_count = 1;
994 (*td)->changed = changed;
995 (*td)->aborted_with_changes = false;
996 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
997 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
998 (*td)->creation_time = le32_to_cpu(details_le.creation_time);
999 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
1001 list_add(&(*td)->list, &pmd->thin_devices);
1006 static void __close_device(struct dm_thin_device *td)
1011 static int __create_thin(struct dm_pool_metadata *pmd,
1015 dm_block_t dev_root;
1017 struct disk_device_details details_le;
1018 struct dm_thin_device *td;
1021 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1027 * Create an empty btree for the mappings.
1029 r = dm_btree_empty(&pmd->bl_info, &dev_root);
1034 * Insert it into the main mapping tree.
1036 value = cpu_to_le64(dev_root);
1037 __dm_bless_for_disk(&value);
1038 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1040 dm_btree_del(&pmd->bl_info, dev_root);
1044 r = __open_device(pmd, dev, 1, &td);
1046 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1047 dm_btree_del(&pmd->bl_info, dev_root);
1055 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
1059 down_write(&pmd->root_lock);
1061 r = __create_thin(pmd, dev);
1062 up_write(&pmd->root_lock);
1067 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
1068 struct dm_thin_device *snap,
1069 dm_thin_id origin, uint32_t time)
1072 struct dm_thin_device *td;
1074 r = __open_device(pmd, origin, 0, &td);
1079 td->snapshotted_time = time;
1081 snap->mapped_blocks = td->mapped_blocks;
1082 snap->snapshotted_time = time;
1088 static int __create_snap(struct dm_pool_metadata *pmd,
1089 dm_thin_id dev, dm_thin_id origin)
1092 dm_block_t origin_root;
1093 uint64_t key = origin, dev_key = dev;
1094 struct dm_thin_device *td;
1095 struct disk_device_details details_le;
1098 /* check this device is unused */
1099 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1100 &dev_key, &details_le);
1104 /* find the mapping tree for the origin */
1105 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
1108 origin_root = le64_to_cpu(value);
1110 /* clone the origin, an inc will do */
1111 dm_tm_inc(pmd->tm, origin_root);
1113 /* insert into the main mapping tree */
1114 value = cpu_to_le64(origin_root);
1115 __dm_bless_for_disk(&value);
1117 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1119 dm_tm_dec(pmd->tm, origin_root);
1125 r = __open_device(pmd, dev, 1, &td);
1129 r = __set_snapshot_details(pmd, td, origin, pmd->time);
1138 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1139 dm_btree_remove(&pmd->details_info, pmd->details_root,
1140 &key, &pmd->details_root);
1144 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1150 down_write(&pmd->root_lock);
1152 r = __create_snap(pmd, dev, origin);
1153 up_write(&pmd->root_lock);
1158 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1162 struct dm_thin_device *td;
1164 /* TODO: failure should mark the transaction invalid */
1165 r = __open_device(pmd, dev, 0, &td);
1169 if (td->open_count > 1) {
1174 list_del(&td->list);
1176 r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1177 &key, &pmd->details_root);
1181 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1188 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1193 down_write(&pmd->root_lock);
1195 r = __delete_device(pmd, dev);
1196 up_write(&pmd->root_lock);
1201 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1202 uint64_t current_id,
1207 down_write(&pmd->root_lock);
1212 if (pmd->trans_id != current_id) {
1213 DMERR("mismatched transaction id");
1217 pmd->trans_id = new_id;
1221 up_write(&pmd->root_lock);
1226 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1231 down_read(&pmd->root_lock);
1232 if (!pmd->fail_io) {
1233 *result = pmd->trans_id;
1236 up_read(&pmd->root_lock);
1241 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1244 struct thin_disk_superblock *disk_super;
1245 struct dm_block *copy, *sblock;
1246 dm_block_t held_root;
1249 * We commit to ensure the btree roots which we increment in a
1250 * moment are up to date.
1252 __commit_transaction(pmd);
1255 * Copy the superblock.
1257 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1258 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1259 &sb_validator, ©, &inc);
1265 held_root = dm_block_location(copy);
1266 disk_super = dm_block_data(copy);
1268 if (le64_to_cpu(disk_super->held_root)) {
1269 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1271 dm_tm_dec(pmd->tm, held_root);
1272 dm_tm_unlock(pmd->tm, copy);
1277 * Wipe the spacemap since we're not publishing this.
1279 memset(&disk_super->data_space_map_root, 0,
1280 sizeof(disk_super->data_space_map_root));
1281 memset(&disk_super->metadata_space_map_root, 0,
1282 sizeof(disk_super->metadata_space_map_root));
1285 * Increment the data structures that need to be preserved.
1287 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1288 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1289 dm_tm_unlock(pmd->tm, copy);
1292 * Write the held root into the superblock.
1294 r = superblock_lock(pmd, &sblock);
1296 dm_tm_dec(pmd->tm, held_root);
1300 disk_super = dm_block_data(sblock);
1301 disk_super->held_root = cpu_to_le64(held_root);
1302 dm_bm_unlock(sblock);
1306 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1310 down_write(&pmd->root_lock);
1312 r = __reserve_metadata_snap(pmd);
1313 up_write(&pmd->root_lock);
1318 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1321 struct thin_disk_superblock *disk_super;
1322 struct dm_block *sblock, *copy;
1323 dm_block_t held_root;
1325 r = superblock_lock(pmd, &sblock);
1329 disk_super = dm_block_data(sblock);
1330 held_root = le64_to_cpu(disk_super->held_root);
1331 disk_super->held_root = cpu_to_le64(0);
1333 dm_bm_unlock(sblock);
1336 DMWARN("No pool metadata snapshot found: nothing to release.");
1340 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©);
1344 disk_super = dm_block_data(copy);
1345 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1346 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1347 dm_sm_dec_block(pmd->metadata_sm, held_root);
1349 dm_tm_unlock(pmd->tm, copy);
1354 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1358 down_write(&pmd->root_lock);
1360 r = __release_metadata_snap(pmd);
1361 up_write(&pmd->root_lock);
1366 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1370 struct thin_disk_superblock *disk_super;
1371 struct dm_block *sblock;
1373 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1374 &sb_validator, &sblock);
1378 disk_super = dm_block_data(sblock);
1379 *result = le64_to_cpu(disk_super->held_root);
1381 dm_bm_unlock(sblock);
1386 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1391 down_read(&pmd->root_lock);
1393 r = __get_metadata_snap(pmd, result);
1394 up_read(&pmd->root_lock);
1399 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1400 struct dm_thin_device **td)
1404 down_write(&pmd->root_lock);
1406 r = __open_device(pmd, dev, 0, td);
1407 up_write(&pmd->root_lock);
1412 int dm_pool_close_thin_device(struct dm_thin_device *td)
1414 down_write(&td->pmd->root_lock);
1416 up_write(&td->pmd->root_lock);
1421 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1427 * Check whether @time (of block creation) is older than @td's last snapshot.
1428 * If so then the associated block is shared with the last snapshot device.
1429 * Any block on a device created *after* the device last got snapshotted is
1430 * necessarily not shared.
1432 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1434 return td->snapshotted_time > time;
1437 static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
1438 struct dm_thin_lookup_result *result)
1440 uint64_t block_time = 0;
1441 dm_block_t exception_block;
1442 uint32_t exception_time;
1444 block_time = le64_to_cpu(value);
1445 unpack_block_time(block_time, &exception_block, &exception_time);
1446 result->block = exception_block;
1447 result->shared = __snapshotted_since(td, exception_time);
1450 static int __find_block(struct dm_thin_device *td, dm_block_t block,
1451 int can_issue_io, struct dm_thin_lookup_result *result)
1455 struct dm_pool_metadata *pmd = td->pmd;
1456 dm_block_t keys[2] = { td->id, block };
1457 struct dm_btree_info *info;
1462 info = &pmd->nb_info;
1464 r = dm_btree_lookup(info, pmd->root, keys, &value);
1466 unpack_lookup_result(td, value, result);
1471 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1472 int can_issue_io, struct dm_thin_lookup_result *result)
1475 struct dm_pool_metadata *pmd = td->pmd;
1477 down_read(&pmd->root_lock);
1479 up_read(&pmd->root_lock);
1483 r = __find_block(td, block, can_issue_io, result);
1485 up_read(&pmd->root_lock);
1489 static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
1491 struct dm_thin_lookup_result *result)
1495 struct dm_pool_metadata *pmd = td->pmd;
1496 dm_block_t keys[2] = { td->id, block };
1498 r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
1500 unpack_lookup_result(td, value, result);
1505 static int __find_mapped_range(struct dm_thin_device *td,
1506 dm_block_t begin, dm_block_t end,
1507 dm_block_t *thin_begin, dm_block_t *thin_end,
1508 dm_block_t *pool_begin, bool *maybe_shared)
1511 dm_block_t pool_end;
1512 struct dm_thin_lookup_result lookup;
1517 r = __find_next_mapped_block(td, begin, &begin, &lookup);
1524 *thin_begin = begin;
1525 *pool_begin = lookup.block;
1526 *maybe_shared = lookup.shared;
1529 pool_end = *pool_begin + 1;
1530 while (begin != end) {
1531 r = __find_block(td, begin, true, &lookup);
1539 if ((lookup.block != pool_end) ||
1540 (lookup.shared != *maybe_shared))
1551 int dm_thin_find_mapped_range(struct dm_thin_device *td,
1552 dm_block_t begin, dm_block_t end,
1553 dm_block_t *thin_begin, dm_block_t *thin_end,
1554 dm_block_t *pool_begin, bool *maybe_shared)
1557 struct dm_pool_metadata *pmd = td->pmd;
1559 down_read(&pmd->root_lock);
1560 if (!pmd->fail_io) {
1561 r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
1562 pool_begin, maybe_shared);
1564 up_read(&pmd->root_lock);
1569 static int __insert(struct dm_thin_device *td, dm_block_t block,
1570 dm_block_t data_block)
1574 struct dm_pool_metadata *pmd = td->pmd;
1575 dm_block_t keys[2] = { td->id, block };
1577 value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1578 __dm_bless_for_disk(&value);
1580 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1581 &pmd->root, &inserted);
1587 td->mapped_blocks++;
1592 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1593 dm_block_t data_block)
1597 down_write(&td->pmd->root_lock);
1598 if (!td->pmd->fail_io)
1599 r = __insert(td, block, data_block);
1600 up_write(&td->pmd->root_lock);
1605 static int __remove(struct dm_thin_device *td, dm_block_t block)
1608 struct dm_pool_metadata *pmd = td->pmd;
1609 dm_block_t keys[2] = { td->id, block };
1611 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
1615 td->mapped_blocks--;
1621 static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1624 unsigned count, total_count = 0;
1625 struct dm_pool_metadata *pmd = td->pmd;
1626 dm_block_t keys[1] = { td->id };
1628 dm_block_t mapping_root;
1631 * Find the mapping tree
1633 r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
1638 * Remove from the mapping tree, taking care to inc the
1639 * ref count so it doesn't get deleted.
1641 mapping_root = le64_to_cpu(value);
1642 dm_tm_inc(pmd->tm, mapping_root);
1643 r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
1648 * Remove leaves stops at the first unmapped entry, so we have to
1649 * loop round finding mapped ranges.
1651 while (begin < end) {
1652 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1662 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1666 total_count += count;
1669 td->mapped_blocks -= total_count;
1673 * Reinsert the mapping tree.
1675 value = cpu_to_le64(mapping_root);
1676 __dm_bless_for_disk(&value);
1677 return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
1680 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1684 down_write(&td->pmd->root_lock);
1685 if (!td->pmd->fail_io)
1686 r = __remove(td, block);
1687 up_write(&td->pmd->root_lock);
1692 int dm_thin_remove_range(struct dm_thin_device *td,
1693 dm_block_t begin, dm_block_t end)
1697 down_write(&td->pmd->root_lock);
1698 if (!td->pmd->fail_io)
1699 r = __remove_range(td, begin, end);
1700 up_write(&td->pmd->root_lock);
1705 int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1710 down_read(&pmd->root_lock);
1711 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1713 *result = (ref_count > 1);
1714 up_read(&pmd->root_lock);
1719 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1723 down_write(&pmd->root_lock);
1724 for (; b != e; b++) {
1725 r = dm_sm_inc_block(pmd->data_sm, b);
1729 up_write(&pmd->root_lock);
1734 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1738 down_write(&pmd->root_lock);
1739 for (; b != e; b++) {
1740 r = dm_sm_dec_block(pmd->data_sm, b);
1744 up_write(&pmd->root_lock);
1749 bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1753 down_read(&td->pmd->root_lock);
1755 up_read(&td->pmd->root_lock);
1760 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1763 struct dm_thin_device *td, *tmp;
1765 down_read(&pmd->root_lock);
1766 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1772 up_read(&pmd->root_lock);
1777 bool dm_thin_aborted_changes(struct dm_thin_device *td)
1781 down_read(&td->pmd->root_lock);
1782 r = td->aborted_with_changes;
1783 up_read(&td->pmd->root_lock);
1788 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1792 down_write(&pmd->root_lock);
1794 r = dm_sm_new_block(pmd->data_sm, result);
1795 up_write(&pmd->root_lock);
1800 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1804 down_write(&pmd->root_lock);
1808 r = __commit_transaction(pmd);
1813 * Open the next transaction.
1815 r = __begin_transaction(pmd);
1817 up_write(&pmd->root_lock);
1821 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
1823 struct dm_thin_device *td;
1825 list_for_each_entry(td, &pmd->thin_devices, list)
1826 td->aborted_with_changes = td->changed;
1829 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
1833 down_write(&pmd->root_lock);
1837 __set_abort_with_changes_flags(pmd);
1838 __destroy_persistent_data_objects(pmd);
1839 r = __create_persistent_data_objects(pmd, false);
1841 pmd->fail_io = true;
1844 up_write(&pmd->root_lock);
1849 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1853 down_read(&pmd->root_lock);
1855 r = dm_sm_get_nr_free(pmd->data_sm, result);
1856 up_read(&pmd->root_lock);
1861 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1866 down_read(&pmd->root_lock);
1868 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1871 if (*result < pmd->metadata_reserve)
1874 *result -= pmd->metadata_reserve;
1876 up_read(&pmd->root_lock);
1881 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1886 down_read(&pmd->root_lock);
1888 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1889 up_read(&pmd->root_lock);
1894 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1898 down_read(&pmd->root_lock);
1900 r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1901 up_read(&pmd->root_lock);
1906 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1909 struct dm_pool_metadata *pmd = td->pmd;
1911 down_read(&pmd->root_lock);
1912 if (!pmd->fail_io) {
1913 *result = td->mapped_blocks;
1916 up_read(&pmd->root_lock);
1921 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1925 dm_block_t thin_root;
1926 struct dm_pool_metadata *pmd = td->pmd;
1928 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
1932 thin_root = le64_to_cpu(value_le);
1934 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
1937 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1941 struct dm_pool_metadata *pmd = td->pmd;
1943 down_read(&pmd->root_lock);
1945 r = __highest_block(td, result);
1946 up_read(&pmd->root_lock);
1951 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
1954 dm_block_t old_count;
1956 r = dm_sm_get_nr_blocks(sm, &old_count);
1960 if (new_count == old_count)
1963 if (new_count < old_count) {
1964 DMERR("cannot reduce size of space map");
1968 return dm_sm_extend(sm, new_count - old_count);
1971 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1975 down_write(&pmd->root_lock);
1977 r = __resize_space_map(pmd->data_sm, new_count);
1978 up_write(&pmd->root_lock);
1983 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1987 down_write(&pmd->root_lock);
1988 if (!pmd->fail_io) {
1989 r = __resize_space_map(pmd->metadata_sm, new_count);
1991 __set_metadata_reserve(pmd);
1993 up_write(&pmd->root_lock);
1998 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
2000 down_write(&pmd->root_lock);
2001 dm_bm_set_read_only(pmd->bm);
2002 up_write(&pmd->root_lock);
2005 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
2007 down_write(&pmd->root_lock);
2008 dm_bm_set_read_write(pmd->bm);
2009 up_write(&pmd->root_lock);
2012 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
2013 dm_block_t threshold,
2014 dm_sm_threshold_fn fn,
2019 down_write(&pmd->root_lock);
2020 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
2021 up_write(&pmd->root_lock);
2026 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
2029 struct dm_block *sblock;
2030 struct thin_disk_superblock *disk_super;
2032 down_write(&pmd->root_lock);
2033 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
2035 r = superblock_lock(pmd, &sblock);
2037 DMERR("couldn't read superblock");
2041 disk_super = dm_block_data(sblock);
2042 disk_super->flags = cpu_to_le32(pmd->flags);
2044 dm_bm_unlock(sblock);
2046 up_write(&pmd->root_lock);
2050 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
2054 down_read(&pmd->root_lock);
2055 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
2056 up_read(&pmd->root_lock);
2061 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
2063 down_read(&pmd->root_lock);
2065 dm_tm_issue_prefetches(pmd->tm);
2066 up_read(&pmd->root_lock);