2 * Copyright (C) 2011-2012 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
20 * - A superblock in block zero, taking up fewer than 512 bytes for
23 * - A space map managing the metadata blocks.
25 * - A space map managing the data blocks.
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
42 * Space maps have 2 btrees:
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
54 * 3 - ref count is higher than 2
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
75 #define DM_MSG_PREFIX "thin metadata"
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 2
80 #define SECTOR_TO_BLOCK_SHIFT 3
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
90 #define THIN_MAX_CONCURRENT_LOCKS 6
92 /* This should be plenty */
93 #define SPACE_MAP_ROOT_SIZE 128
96 * Little endian on-disk superblock and device details.
98 struct thin_disk_superblock {
99 __le32 csum; /* Checksum of superblock except for this field. */
101 __le64 blocknr; /* This block number, dm_block_t. */
111 * Root held by userspace transactions.
115 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
116 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
119 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
121 __le64 data_mapping_root;
124 * Device detail root mapping dev_id -> device_details
126 __le64 device_details_root;
128 __le32 data_block_size; /* In 512-byte sectors. */
130 __le32 metadata_block_size; /* In 512-byte sectors. */
131 __le64 metadata_nr_blocks;
134 __le32 compat_ro_flags;
135 __le32 incompat_flags;
138 struct disk_device_details {
139 __le64 mapped_blocks;
140 __le64 transaction_id; /* When created. */
141 __le32 creation_time;
142 __le32 snapshotted_time;
145 struct dm_pool_metadata {
146 struct hlist_node hash;
148 struct block_device *bdev;
149 struct dm_block_manager *bm;
150 struct dm_space_map *metadata_sm;
151 struct dm_space_map *data_sm;
152 struct dm_transaction_manager *tm;
153 struct dm_transaction_manager *nb_tm;
157 * First level holds thin_dev_t.
158 * Second level holds mappings.
160 struct dm_btree_info info;
163 * Non-blocking version of the above.
165 struct dm_btree_info nb_info;
168 * Just the top level for deleting whole devices.
170 struct dm_btree_info tl_info;
173 * Just the bottom level for creating new devices.
175 struct dm_btree_info bl_info;
178 * Describes the device details btree.
180 struct dm_btree_info details_info;
182 struct rw_semaphore root_lock;
185 dm_block_t details_root;
186 struct list_head thin_devices;
189 sector_t data_block_size;
192 * We reserve a section of the metadata for commit overhead.
193 * All reported space does *not* include this.
195 dm_block_t metadata_reserve;
198 * Set if a transaction has to be aborted but the attempt to roll back
199 * to the previous (good) transaction failed. The only pool metadata
200 * operation possible in this state is the closing of the device.
205 * Reading the space map roots can fail, so we read it into these
206 * buffers before the superblock is locked and updated.
208 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
209 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
212 struct dm_thin_device {
213 struct list_head list;
214 struct dm_pool_metadata *pmd;
219 bool aborted_with_changes:1;
220 uint64_t mapped_blocks;
221 uint64_t transaction_id;
222 uint32_t creation_time;
223 uint32_t snapshotted_time;
226 /*----------------------------------------------------------------
227 * superblock validator
228 *--------------------------------------------------------------*/
230 #define SUPERBLOCK_CSUM_XOR 160774
232 static void sb_prepare_for_write(struct dm_block_validator *v,
236 struct thin_disk_superblock *disk_super = dm_block_data(b);
238 disk_super->blocknr = cpu_to_le64(dm_block_location(b));
239 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
240 block_size - sizeof(__le32),
241 SUPERBLOCK_CSUM_XOR));
244 static int sb_check(struct dm_block_validator *v,
248 struct thin_disk_superblock *disk_super = dm_block_data(b);
251 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
252 DMERR("sb_check failed: blocknr %llu: "
253 "wanted %llu", le64_to_cpu(disk_super->blocknr),
254 (unsigned long long)dm_block_location(b));
258 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
259 DMERR("sb_check failed: magic %llu: "
260 "wanted %llu", le64_to_cpu(disk_super->magic),
261 (unsigned long long)THIN_SUPERBLOCK_MAGIC);
265 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
266 block_size - sizeof(__le32),
267 SUPERBLOCK_CSUM_XOR));
268 if (csum_le != disk_super->csum) {
269 DMERR("sb_check failed: csum %u: wanted %u",
270 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
277 static struct dm_block_validator sb_validator = {
278 .name = "superblock",
279 .prepare_for_write = sb_prepare_for_write,
283 /*----------------------------------------------------------------
284 * Methods for the btree value types
285 *--------------------------------------------------------------*/
287 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
289 return (b << 24) | t;
292 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
295 *t = v & ((1 << 24) - 1);
298 static void data_block_inc(void *context, const void *value_le)
300 struct dm_space_map *sm = context;
305 memcpy(&v_le, value_le, sizeof(v_le));
306 unpack_block_time(le64_to_cpu(v_le), &b, &t);
307 dm_sm_inc_block(sm, b);
310 static void data_block_dec(void *context, const void *value_le)
312 struct dm_space_map *sm = context;
317 memcpy(&v_le, value_le, sizeof(v_le));
318 unpack_block_time(le64_to_cpu(v_le), &b, &t);
319 dm_sm_dec_block(sm, b);
322 static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
328 memcpy(&v1_le, value1_le, sizeof(v1_le));
329 memcpy(&v2_le, value2_le, sizeof(v2_le));
330 unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
331 unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
336 static void subtree_inc(void *context, const void *value)
338 struct dm_btree_info *info = context;
342 memcpy(&root_le, value, sizeof(root_le));
343 root = le64_to_cpu(root_le);
344 dm_tm_inc(info->tm, root);
347 static void subtree_dec(void *context, const void *value)
349 struct dm_btree_info *info = context;
353 memcpy(&root_le, value, sizeof(root_le));
354 root = le64_to_cpu(root_le);
355 if (dm_btree_del(info, root))
356 DMERR("btree delete failed");
359 static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
362 memcpy(&v1_le, value1_le, sizeof(v1_le));
363 memcpy(&v2_le, value2_le, sizeof(v2_le));
365 return v1_le == v2_le;
368 /*----------------------------------------------------------------*/
370 static int superblock_lock_zero(struct dm_pool_metadata *pmd,
371 struct dm_block **sblock)
373 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
374 &sb_validator, sblock);
377 static int superblock_lock(struct dm_pool_metadata *pmd,
378 struct dm_block **sblock)
380 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
381 &sb_validator, sblock);
384 static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
389 __le64 *data_le, zero = cpu_to_le64(0);
390 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
393 * We can't use a validator here - it may be all zeroes.
395 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
399 data_le = dm_block_data(b);
401 for (i = 0; i < block_size; i++) {
402 if (data_le[i] != zero) {
413 static void __setup_btree_details(struct dm_pool_metadata *pmd)
415 pmd->info.tm = pmd->tm;
416 pmd->info.levels = 2;
417 pmd->info.value_type.context = pmd->data_sm;
418 pmd->info.value_type.size = sizeof(__le64);
419 pmd->info.value_type.inc = data_block_inc;
420 pmd->info.value_type.dec = data_block_dec;
421 pmd->info.value_type.equal = data_block_equal;
423 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
424 pmd->nb_info.tm = pmd->nb_tm;
426 pmd->tl_info.tm = pmd->tm;
427 pmd->tl_info.levels = 1;
428 pmd->tl_info.value_type.context = &pmd->bl_info;
429 pmd->tl_info.value_type.size = sizeof(__le64);
430 pmd->tl_info.value_type.inc = subtree_inc;
431 pmd->tl_info.value_type.dec = subtree_dec;
432 pmd->tl_info.value_type.equal = subtree_equal;
434 pmd->bl_info.tm = pmd->tm;
435 pmd->bl_info.levels = 1;
436 pmd->bl_info.value_type.context = pmd->data_sm;
437 pmd->bl_info.value_type.size = sizeof(__le64);
438 pmd->bl_info.value_type.inc = data_block_inc;
439 pmd->bl_info.value_type.dec = data_block_dec;
440 pmd->bl_info.value_type.equal = data_block_equal;
442 pmd->details_info.tm = pmd->tm;
443 pmd->details_info.levels = 1;
444 pmd->details_info.value_type.context = NULL;
445 pmd->details_info.value_type.size = sizeof(struct disk_device_details);
446 pmd->details_info.value_type.inc = NULL;
447 pmd->details_info.value_type.dec = NULL;
448 pmd->details_info.value_type.equal = NULL;
451 static int save_sm_roots(struct dm_pool_metadata *pmd)
456 r = dm_sm_root_size(pmd->metadata_sm, &len);
460 r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
464 r = dm_sm_root_size(pmd->data_sm, &len);
468 return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
471 static void copy_sm_roots(struct dm_pool_metadata *pmd,
472 struct thin_disk_superblock *disk)
474 memcpy(&disk->metadata_space_map_root,
475 &pmd->metadata_space_map_root,
476 sizeof(pmd->metadata_space_map_root));
478 memcpy(&disk->data_space_map_root,
479 &pmd->data_space_map_root,
480 sizeof(pmd->data_space_map_root));
483 static int __write_initial_superblock(struct dm_pool_metadata *pmd)
486 struct dm_block *sblock;
487 struct thin_disk_superblock *disk_super;
488 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
490 if (bdev_size > THIN_METADATA_MAX_SECTORS)
491 bdev_size = THIN_METADATA_MAX_SECTORS;
493 r = dm_sm_commit(pmd->data_sm);
497 r = dm_tm_pre_commit(pmd->tm);
501 r = save_sm_roots(pmd);
505 r = superblock_lock_zero(pmd, &sblock);
509 disk_super = dm_block_data(sblock);
510 disk_super->flags = 0;
511 memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
512 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
513 disk_super->version = cpu_to_le32(THIN_VERSION);
514 disk_super->time = 0;
515 disk_super->trans_id = 0;
516 disk_super->held_root = 0;
518 copy_sm_roots(pmd, disk_super);
520 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
521 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
522 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
523 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
524 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
526 return dm_tm_commit(pmd->tm, sblock);
529 static int __format_metadata(struct dm_pool_metadata *pmd)
533 r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
534 &pmd->tm, &pmd->metadata_sm);
536 DMERR("tm_create_with_sm failed");
540 pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
541 if (IS_ERR(pmd->data_sm)) {
542 DMERR("sm_disk_create failed");
543 r = PTR_ERR(pmd->data_sm);
547 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
549 DMERR("could not create non-blocking clone tm");
551 goto bad_cleanup_data_sm;
554 __setup_btree_details(pmd);
556 r = dm_btree_empty(&pmd->info, &pmd->root);
558 goto bad_cleanup_nb_tm;
560 r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
562 DMERR("couldn't create devices root");
563 goto bad_cleanup_nb_tm;
566 r = __write_initial_superblock(pmd);
568 goto bad_cleanup_nb_tm;
573 dm_tm_destroy(pmd->nb_tm);
575 dm_sm_destroy(pmd->data_sm);
577 dm_tm_destroy(pmd->tm);
578 dm_sm_destroy(pmd->metadata_sm);
583 static int __check_incompat_features(struct thin_disk_superblock *disk_super,
584 struct dm_pool_metadata *pmd)
588 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
590 DMERR("could not access metadata due to unsupported optional features (%lx).",
591 (unsigned long)features);
596 * Check for read-only metadata to skip the following RDWR checks.
598 if (get_disk_ro(pmd->bdev->bd_disk))
601 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
603 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
604 (unsigned long)features);
611 static int __open_metadata(struct dm_pool_metadata *pmd)
614 struct dm_block *sblock;
615 struct thin_disk_superblock *disk_super;
617 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
618 &sb_validator, &sblock);
620 DMERR("couldn't read superblock");
624 disk_super = dm_block_data(sblock);
626 /* Verify the data block size hasn't changed */
627 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
628 DMERR("changing the data block size (from %u to %llu) is not supported",
629 le32_to_cpu(disk_super->data_block_size),
630 (unsigned long long)pmd->data_block_size);
632 goto bad_unlock_sblock;
635 r = __check_incompat_features(disk_super, pmd);
637 goto bad_unlock_sblock;
639 r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
640 disk_super->metadata_space_map_root,
641 sizeof(disk_super->metadata_space_map_root),
642 &pmd->tm, &pmd->metadata_sm);
644 DMERR("tm_open_with_sm failed");
645 goto bad_unlock_sblock;
648 pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
649 sizeof(disk_super->data_space_map_root));
650 if (IS_ERR(pmd->data_sm)) {
651 DMERR("sm_disk_open failed");
652 r = PTR_ERR(pmd->data_sm);
656 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
658 DMERR("could not create non-blocking clone tm");
660 goto bad_cleanup_data_sm;
664 * For pool metadata opening process, root setting is redundant
665 * because it will be set again in __begin_transaction(). But dm
666 * pool aborting process really needs to get last transaction's
667 * root to avoid accessing broken btree.
669 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
670 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
672 __setup_btree_details(pmd);
673 dm_bm_unlock(sblock);
678 dm_sm_destroy(pmd->data_sm);
680 dm_tm_destroy(pmd->tm);
681 dm_sm_destroy(pmd->metadata_sm);
683 dm_bm_unlock(sblock);
688 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
692 r = __superblock_all_zeroes(pmd->bm, &unformatted);
697 return format_device ? __format_metadata(pmd) : -EPERM;
699 return __open_metadata(pmd);
702 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
706 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
707 THIN_MAX_CONCURRENT_LOCKS);
708 if (IS_ERR(pmd->bm)) {
709 DMERR("could not create block manager");
710 r = PTR_ERR(pmd->bm);
715 r = __open_or_format_metadata(pmd, format_device);
717 dm_block_manager_destroy(pmd->bm);
724 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
726 dm_sm_destroy(pmd->data_sm);
727 dm_sm_destroy(pmd->metadata_sm);
728 dm_tm_destroy(pmd->nb_tm);
729 dm_tm_destroy(pmd->tm);
730 dm_block_manager_destroy(pmd->bm);
733 static int __begin_transaction(struct dm_pool_metadata *pmd)
736 struct thin_disk_superblock *disk_super;
737 struct dm_block *sblock;
740 * We re-read the superblock every time. Shouldn't need to do this
743 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
744 &sb_validator, &sblock);
748 disk_super = dm_block_data(sblock);
749 pmd->time = le32_to_cpu(disk_super->time);
750 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
751 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
752 pmd->trans_id = le64_to_cpu(disk_super->trans_id);
753 pmd->flags = le32_to_cpu(disk_super->flags);
754 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
756 dm_bm_unlock(sblock);
760 static int __write_changed_details(struct dm_pool_metadata *pmd)
763 struct dm_thin_device *td, *tmp;
764 struct disk_device_details details;
767 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
773 details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
774 details.transaction_id = cpu_to_le64(td->transaction_id);
775 details.creation_time = cpu_to_le32(td->creation_time);
776 details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
777 __dm_bless_for_disk(&details);
779 r = dm_btree_insert(&pmd->details_info, pmd->details_root,
780 &key, &details, &pmd->details_root);
795 static int __commit_transaction(struct dm_pool_metadata *pmd)
798 struct thin_disk_superblock *disk_super;
799 struct dm_block *sblock;
802 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
804 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
806 r = __write_changed_details(pmd);
810 r = dm_sm_commit(pmd->data_sm);
814 r = dm_tm_pre_commit(pmd->tm);
818 r = save_sm_roots(pmd);
822 r = superblock_lock(pmd, &sblock);
826 disk_super = dm_block_data(sblock);
827 disk_super->time = cpu_to_le32(pmd->time);
828 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
829 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
830 disk_super->trans_id = cpu_to_le64(pmd->trans_id);
831 disk_super->flags = cpu_to_le32(pmd->flags);
833 copy_sm_roots(pmd, disk_super);
835 return dm_tm_commit(pmd->tm, sblock);
838 static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
842 dm_block_t max_blocks = 4096; /* 16M */
844 r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
846 DMERR("could not get size of metadata device");
847 pmd->metadata_reserve = max_blocks;
849 pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
852 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
853 sector_t data_block_size,
857 struct dm_pool_metadata *pmd;
859 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
861 DMERR("could not allocate metadata struct");
862 return ERR_PTR(-ENOMEM);
865 init_rwsem(&pmd->root_lock);
867 INIT_LIST_HEAD(&pmd->thin_devices);
868 pmd->fail_io = false;
870 pmd->data_block_size = data_block_size;
872 r = __create_persistent_data_objects(pmd, format_device);
878 r = __begin_transaction(pmd);
880 if (dm_pool_metadata_close(pmd) < 0)
881 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
885 __set_metadata_reserve(pmd);
890 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
893 unsigned open_devices = 0;
894 struct dm_thin_device *td, *tmp;
896 down_read(&pmd->root_lock);
897 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
905 up_read(&pmd->root_lock);
908 DMERR("attempt to close pmd when %u device(s) are still open",
913 if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
914 r = __commit_transaction(pmd);
916 DMWARN("%s: __commit_transaction() failed, error = %d",
921 __destroy_persistent_data_objects(pmd);
928 * __open_device: Returns @td corresponding to device with id @dev,
929 * creating it if @create is set and incrementing @td->open_count.
930 * On failure, @td is undefined.
932 static int __open_device(struct dm_pool_metadata *pmd,
933 dm_thin_id dev, int create,
934 struct dm_thin_device **td)
937 struct dm_thin_device *td2;
939 struct disk_device_details details_le;
942 * If the device is already open, return it.
944 list_for_each_entry(td2, &pmd->thin_devices, list)
945 if (td2->id == dev) {
947 * May not create an already-open device.
958 * Check the device exists.
960 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
963 if (r != -ENODATA || !create)
970 details_le.mapped_blocks = 0;
971 details_le.transaction_id = cpu_to_le64(pmd->trans_id);
972 details_le.creation_time = cpu_to_le32(pmd->time);
973 details_le.snapshotted_time = cpu_to_le32(pmd->time);
976 *td = kmalloc(sizeof(**td), GFP_NOIO);
982 (*td)->open_count = 1;
983 (*td)->changed = changed;
984 (*td)->aborted_with_changes = false;
985 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
986 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
987 (*td)->creation_time = le32_to_cpu(details_le.creation_time);
988 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
990 list_add(&(*td)->list, &pmd->thin_devices);
995 static void __close_device(struct dm_thin_device *td)
1000 static int __create_thin(struct dm_pool_metadata *pmd,
1004 dm_block_t dev_root;
1006 struct disk_device_details details_le;
1007 struct dm_thin_device *td;
1010 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1016 * Create an empty btree for the mappings.
1018 r = dm_btree_empty(&pmd->bl_info, &dev_root);
1023 * Insert it into the main mapping tree.
1025 value = cpu_to_le64(dev_root);
1026 __dm_bless_for_disk(&value);
1027 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1029 dm_btree_del(&pmd->bl_info, dev_root);
1033 r = __open_device(pmd, dev, 1, &td);
1035 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1036 dm_btree_del(&pmd->bl_info, dev_root);
1044 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
1048 down_write(&pmd->root_lock);
1050 r = __create_thin(pmd, dev);
1051 up_write(&pmd->root_lock);
1056 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
1057 struct dm_thin_device *snap,
1058 dm_thin_id origin, uint32_t time)
1061 struct dm_thin_device *td;
1063 r = __open_device(pmd, origin, 0, &td);
1068 td->snapshotted_time = time;
1070 snap->mapped_blocks = td->mapped_blocks;
1071 snap->snapshotted_time = time;
1077 static int __create_snap(struct dm_pool_metadata *pmd,
1078 dm_thin_id dev, dm_thin_id origin)
1081 dm_block_t origin_root;
1082 uint64_t key = origin, dev_key = dev;
1083 struct dm_thin_device *td;
1084 struct disk_device_details details_le;
1087 /* check this device is unused */
1088 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
1089 &dev_key, &details_le);
1093 /* find the mapping tree for the origin */
1094 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
1097 origin_root = le64_to_cpu(value);
1099 /* clone the origin, an inc will do */
1100 dm_tm_inc(pmd->tm, origin_root);
1102 /* insert into the main mapping tree */
1103 value = cpu_to_le64(origin_root);
1104 __dm_bless_for_disk(&value);
1106 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
1108 dm_tm_dec(pmd->tm, origin_root);
1114 r = __open_device(pmd, dev, 1, &td);
1118 r = __set_snapshot_details(pmd, td, origin, pmd->time);
1127 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1128 dm_btree_remove(&pmd->details_info, pmd->details_root,
1129 &key, &pmd->details_root);
1133 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1139 down_write(&pmd->root_lock);
1141 r = __create_snap(pmd, dev, origin);
1142 up_write(&pmd->root_lock);
1147 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1151 struct dm_thin_device *td;
1153 /* TODO: failure should mark the transaction invalid */
1154 r = __open_device(pmd, dev, 0, &td);
1158 if (td->open_count > 1) {
1163 list_del(&td->list);
1165 r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1166 &key, &pmd->details_root);
1170 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1177 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1182 down_write(&pmd->root_lock);
1184 r = __delete_device(pmd, dev);
1185 up_write(&pmd->root_lock);
1190 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1191 uint64_t current_id,
1196 down_write(&pmd->root_lock);
1201 if (pmd->trans_id != current_id) {
1202 DMERR("mismatched transaction id");
1206 pmd->trans_id = new_id;
1210 up_write(&pmd->root_lock);
1215 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1220 down_read(&pmd->root_lock);
1221 if (!pmd->fail_io) {
1222 *result = pmd->trans_id;
1225 up_read(&pmd->root_lock);
1230 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1233 struct thin_disk_superblock *disk_super;
1234 struct dm_block *copy, *sblock;
1235 dm_block_t held_root;
1238 * We commit to ensure the btree roots which we increment in a
1239 * moment are up to date.
1241 __commit_transaction(pmd);
1244 * Copy the superblock.
1246 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1247 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1248 &sb_validator, ©, &inc);
1254 held_root = dm_block_location(copy);
1255 disk_super = dm_block_data(copy);
1257 if (le64_to_cpu(disk_super->held_root)) {
1258 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1260 dm_tm_dec(pmd->tm, held_root);
1261 dm_tm_unlock(pmd->tm, copy);
1266 * Wipe the spacemap since we're not publishing this.
1268 memset(&disk_super->data_space_map_root, 0,
1269 sizeof(disk_super->data_space_map_root));
1270 memset(&disk_super->metadata_space_map_root, 0,
1271 sizeof(disk_super->metadata_space_map_root));
1274 * Increment the data structures that need to be preserved.
1276 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1277 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1278 dm_tm_unlock(pmd->tm, copy);
1281 * Write the held root into the superblock.
1283 r = superblock_lock(pmd, &sblock);
1285 dm_tm_dec(pmd->tm, held_root);
1289 disk_super = dm_block_data(sblock);
1290 disk_super->held_root = cpu_to_le64(held_root);
1291 dm_bm_unlock(sblock);
1295 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1299 down_write(&pmd->root_lock);
1301 r = __reserve_metadata_snap(pmd);
1302 up_write(&pmd->root_lock);
1307 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1310 struct thin_disk_superblock *disk_super;
1311 struct dm_block *sblock, *copy;
1312 dm_block_t held_root;
1314 r = superblock_lock(pmd, &sblock);
1318 disk_super = dm_block_data(sblock);
1319 held_root = le64_to_cpu(disk_super->held_root);
1320 disk_super->held_root = cpu_to_le64(0);
1322 dm_bm_unlock(sblock);
1325 DMWARN("No pool metadata snapshot found: nothing to release.");
1329 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©);
1333 disk_super = dm_block_data(copy);
1334 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1335 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1336 dm_sm_dec_block(pmd->metadata_sm, held_root);
1338 dm_tm_unlock(pmd->tm, copy);
1343 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1347 down_write(&pmd->root_lock);
1349 r = __release_metadata_snap(pmd);
1350 up_write(&pmd->root_lock);
1355 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1359 struct thin_disk_superblock *disk_super;
1360 struct dm_block *sblock;
1362 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1363 &sb_validator, &sblock);
1367 disk_super = dm_block_data(sblock);
1368 *result = le64_to_cpu(disk_super->held_root);
1370 dm_bm_unlock(sblock);
1375 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1380 down_read(&pmd->root_lock);
1382 r = __get_metadata_snap(pmd, result);
1383 up_read(&pmd->root_lock);
1388 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1389 struct dm_thin_device **td)
1393 down_write(&pmd->root_lock);
1395 r = __open_device(pmd, dev, 0, td);
1396 up_write(&pmd->root_lock);
1401 int dm_pool_close_thin_device(struct dm_thin_device *td)
1403 down_write(&td->pmd->root_lock);
1405 up_write(&td->pmd->root_lock);
1410 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1416 * Check whether @time (of block creation) is older than @td's last snapshot.
1417 * If so then the associated block is shared with the last snapshot device.
1418 * Any block on a device created *after* the device last got snapshotted is
1419 * necessarily not shared.
1421 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1423 return td->snapshotted_time > time;
1426 static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
1427 struct dm_thin_lookup_result *result)
1429 uint64_t block_time = 0;
1430 dm_block_t exception_block;
1431 uint32_t exception_time;
1433 block_time = le64_to_cpu(value);
1434 unpack_block_time(block_time, &exception_block, &exception_time);
1435 result->block = exception_block;
1436 result->shared = __snapshotted_since(td, exception_time);
1439 static int __find_block(struct dm_thin_device *td, dm_block_t block,
1440 int can_issue_io, struct dm_thin_lookup_result *result)
1444 struct dm_pool_metadata *pmd = td->pmd;
1445 dm_block_t keys[2] = { td->id, block };
1446 struct dm_btree_info *info;
1451 info = &pmd->nb_info;
1453 r = dm_btree_lookup(info, pmd->root, keys, &value);
1455 unpack_lookup_result(td, value, result);
1460 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1461 int can_issue_io, struct dm_thin_lookup_result *result)
1464 struct dm_pool_metadata *pmd = td->pmd;
1466 down_read(&pmd->root_lock);
1468 up_read(&pmd->root_lock);
1472 r = __find_block(td, block, can_issue_io, result);
1474 up_read(&pmd->root_lock);
1478 static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
1480 struct dm_thin_lookup_result *result)
1484 struct dm_pool_metadata *pmd = td->pmd;
1485 dm_block_t keys[2] = { td->id, block };
1487 r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
1489 unpack_lookup_result(td, value, result);
1494 static int __find_mapped_range(struct dm_thin_device *td,
1495 dm_block_t begin, dm_block_t end,
1496 dm_block_t *thin_begin, dm_block_t *thin_end,
1497 dm_block_t *pool_begin, bool *maybe_shared)
1500 dm_block_t pool_end;
1501 struct dm_thin_lookup_result lookup;
1506 r = __find_next_mapped_block(td, begin, &begin, &lookup);
1513 *thin_begin = begin;
1514 *pool_begin = lookup.block;
1515 *maybe_shared = lookup.shared;
1518 pool_end = *pool_begin + 1;
1519 while (begin != end) {
1520 r = __find_block(td, begin, true, &lookup);
1528 if ((lookup.block != pool_end) ||
1529 (lookup.shared != *maybe_shared))
1540 int dm_thin_find_mapped_range(struct dm_thin_device *td,
1541 dm_block_t begin, dm_block_t end,
1542 dm_block_t *thin_begin, dm_block_t *thin_end,
1543 dm_block_t *pool_begin, bool *maybe_shared)
1546 struct dm_pool_metadata *pmd = td->pmd;
1548 down_read(&pmd->root_lock);
1549 if (!pmd->fail_io) {
1550 r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
1551 pool_begin, maybe_shared);
1553 up_read(&pmd->root_lock);
1558 static int __insert(struct dm_thin_device *td, dm_block_t block,
1559 dm_block_t data_block)
1563 struct dm_pool_metadata *pmd = td->pmd;
1564 dm_block_t keys[2] = { td->id, block };
1566 value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1567 __dm_bless_for_disk(&value);
1569 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1570 &pmd->root, &inserted);
1576 td->mapped_blocks++;
1581 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1582 dm_block_t data_block)
1586 down_write(&td->pmd->root_lock);
1587 if (!td->pmd->fail_io)
1588 r = __insert(td, block, data_block);
1589 up_write(&td->pmd->root_lock);
1594 static int __remove(struct dm_thin_device *td, dm_block_t block)
1597 struct dm_pool_metadata *pmd = td->pmd;
1598 dm_block_t keys[2] = { td->id, block };
1600 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
1604 td->mapped_blocks--;
1610 static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1613 unsigned count, total_count = 0;
1614 struct dm_pool_metadata *pmd = td->pmd;
1615 dm_block_t keys[1] = { td->id };
1617 dm_block_t mapping_root;
1620 * Find the mapping tree
1622 r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
1627 * Remove from the mapping tree, taking care to inc the
1628 * ref count so it doesn't get deleted.
1630 mapping_root = le64_to_cpu(value);
1631 dm_tm_inc(pmd->tm, mapping_root);
1632 r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
1637 * Remove leaves stops at the first unmapped entry, so we have to
1638 * loop round finding mapped ranges.
1640 while (begin < end) {
1641 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1651 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1655 total_count += count;
1658 td->mapped_blocks -= total_count;
1662 * Reinsert the mapping tree.
1664 value = cpu_to_le64(mapping_root);
1665 __dm_bless_for_disk(&value);
1666 return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
1669 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1673 down_write(&td->pmd->root_lock);
1674 if (!td->pmd->fail_io)
1675 r = __remove(td, block);
1676 up_write(&td->pmd->root_lock);
1681 int dm_thin_remove_range(struct dm_thin_device *td,
1682 dm_block_t begin, dm_block_t end)
1686 down_write(&td->pmd->root_lock);
1687 if (!td->pmd->fail_io)
1688 r = __remove_range(td, begin, end);
1689 up_write(&td->pmd->root_lock);
1694 int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1699 down_read(&pmd->root_lock);
1700 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1702 *result = (ref_count > 1);
1703 up_read(&pmd->root_lock);
1708 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1712 down_write(&pmd->root_lock);
1713 for (; b != e; b++) {
1714 r = dm_sm_inc_block(pmd->data_sm, b);
1718 up_write(&pmd->root_lock);
1723 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
1727 down_write(&pmd->root_lock);
1728 for (; b != e; b++) {
1729 r = dm_sm_dec_block(pmd->data_sm, b);
1733 up_write(&pmd->root_lock);
1738 bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1742 down_read(&td->pmd->root_lock);
1744 up_read(&td->pmd->root_lock);
1749 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1752 struct dm_thin_device *td, *tmp;
1754 down_read(&pmd->root_lock);
1755 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1761 up_read(&pmd->root_lock);
1766 bool dm_thin_aborted_changes(struct dm_thin_device *td)
1770 down_read(&td->pmd->root_lock);
1771 r = td->aborted_with_changes;
1772 up_read(&td->pmd->root_lock);
1777 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1781 down_write(&pmd->root_lock);
1783 r = dm_sm_new_block(pmd->data_sm, result);
1784 up_write(&pmd->root_lock);
1789 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1793 down_write(&pmd->root_lock);
1797 r = __commit_transaction(pmd);
1802 * Open the next transaction.
1804 r = __begin_transaction(pmd);
1806 up_write(&pmd->root_lock);
1810 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
1812 struct dm_thin_device *td;
1814 list_for_each_entry(td, &pmd->thin_devices, list)
1815 td->aborted_with_changes = td->changed;
1818 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
1822 down_write(&pmd->root_lock);
1826 __set_abort_with_changes_flags(pmd);
1827 __destroy_persistent_data_objects(pmd);
1828 r = __create_persistent_data_objects(pmd, false);
1830 pmd->fail_io = true;
1833 up_write(&pmd->root_lock);
1838 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1842 down_read(&pmd->root_lock);
1844 r = dm_sm_get_nr_free(pmd->data_sm, result);
1845 up_read(&pmd->root_lock);
1850 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1855 down_read(&pmd->root_lock);
1857 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1860 if (*result < pmd->metadata_reserve)
1863 *result -= pmd->metadata_reserve;
1865 up_read(&pmd->root_lock);
1870 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1875 down_read(&pmd->root_lock);
1877 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1878 up_read(&pmd->root_lock);
1883 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1887 down_read(&pmd->root_lock);
1889 r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1890 up_read(&pmd->root_lock);
1895 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1898 struct dm_pool_metadata *pmd = td->pmd;
1900 down_read(&pmd->root_lock);
1901 if (!pmd->fail_io) {
1902 *result = td->mapped_blocks;
1905 up_read(&pmd->root_lock);
1910 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1914 dm_block_t thin_root;
1915 struct dm_pool_metadata *pmd = td->pmd;
1917 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
1921 thin_root = le64_to_cpu(value_le);
1923 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
1926 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1930 struct dm_pool_metadata *pmd = td->pmd;
1932 down_read(&pmd->root_lock);
1934 r = __highest_block(td, result);
1935 up_read(&pmd->root_lock);
1940 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
1943 dm_block_t old_count;
1945 r = dm_sm_get_nr_blocks(sm, &old_count);
1949 if (new_count == old_count)
1952 if (new_count < old_count) {
1953 DMERR("cannot reduce size of space map");
1957 return dm_sm_extend(sm, new_count - old_count);
1960 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1964 down_write(&pmd->root_lock);
1966 r = __resize_space_map(pmd->data_sm, new_count);
1967 up_write(&pmd->root_lock);
1972 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1976 down_write(&pmd->root_lock);
1977 if (!pmd->fail_io) {
1978 r = __resize_space_map(pmd->metadata_sm, new_count);
1980 __set_metadata_reserve(pmd);
1982 up_write(&pmd->root_lock);
1987 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
1989 down_write(&pmd->root_lock);
1990 dm_bm_set_read_only(pmd->bm);
1991 up_write(&pmd->root_lock);
1994 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
1996 down_write(&pmd->root_lock);
1997 dm_bm_set_read_write(pmd->bm);
1998 up_write(&pmd->root_lock);
2001 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
2002 dm_block_t threshold,
2003 dm_sm_threshold_fn fn,
2008 down_write(&pmd->root_lock);
2009 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
2010 up_write(&pmd->root_lock);
2015 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
2018 struct dm_block *sblock;
2019 struct thin_disk_superblock *disk_super;
2021 down_write(&pmd->root_lock);
2025 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
2027 r = superblock_lock(pmd, &sblock);
2029 DMERR("couldn't lock superblock");
2033 disk_super = dm_block_data(sblock);
2034 disk_super->flags = cpu_to_le32(pmd->flags);
2036 dm_bm_unlock(sblock);
2038 up_write(&pmd->root_lock);
2042 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
2046 down_read(&pmd->root_lock);
2047 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
2048 up_read(&pmd->root_lock);
2053 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
2055 down_read(&pmd->root_lock);
2057 dm_tm_issue_prefetches(pmd->tm);
2058 up_read(&pmd->root_lock);