2 #include "persistent-data/dm-transaction-manager.h"
3 #include "persistent-data/dm-bitset.h"
4 #include "persistent-data/dm-space-map.h"
6 #include <linux/dm-io.h>
7 #include <linux/dm-kcopyd.h>
8 #include <linux/init.h>
9 #include <linux/mempool.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
14 #define DM_MSG_PREFIX "era"
16 #define SUPERBLOCK_LOCATION 0
17 #define SUPERBLOCK_MAGIC 2126579579
18 #define SUPERBLOCK_CSUM_XOR 146538381
19 #define MIN_ERA_VERSION 1
20 #define MAX_ERA_VERSION 1
21 #define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
22 #define MIN_BLOCK_SIZE 8
24 /*----------------------------------------------------------------
26 *--------------------------------------------------------------*/
27 struct writeset_metadata {
33 struct writeset_metadata md;
36 * An in core copy of the bits to save constantly doing look ups on
43 * This does not free off the on disk bitset as this will normally be done
44 * after digesting into the era array.
46 static void writeset_free(struct writeset *ws)
52 static int setup_on_disk_bitset(struct dm_disk_bitset *info,
53 unsigned nr_bits, dm_block_t *root)
57 r = dm_bitset_empty(info, root);
61 return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
64 static size_t bitset_size(unsigned nr_bits)
66 return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
70 * Allocates memory for the in core bitset.
72 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
74 ws->bits = vzalloc(bitset_size(nr_blocks));
76 DMERR("%s: couldn't allocate in memory bitset", __func__);
84 * Wipes the in-core bitset, and creates a new on disk bitset.
86 static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
91 memset(ws->bits, 0, bitset_size(nr_blocks));
93 ws->md.nr_bits = nr_blocks;
94 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
96 DMERR("%s: setup_on_disk_bitset failed", __func__);
103 static bool writeset_marked(struct writeset *ws, dm_block_t block)
105 return test_bit(block, ws->bits);
108 static int writeset_marked_on_disk(struct dm_disk_bitset *info,
109 struct writeset_metadata *m, dm_block_t block,
112 dm_block_t old = m->root;
115 * The bitset was flushed when it was archived, so we know there'll
116 * be no change to the root.
118 int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
120 DMERR("%s: dm_bitset_test_bit failed", __func__);
124 BUG_ON(m->root != old);
130 * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
132 static int writeset_test_and_set(struct dm_disk_bitset *info,
133 struct writeset *ws, uint32_t block)
137 if (!test_bit(block, ws->bits)) {
138 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
140 /* FIXME: fail mode */
150 /*----------------------------------------------------------------
151 * On disk metadata layout
152 *--------------------------------------------------------------*/
153 #define SPACE_MAP_ROOT_SIZE 128
156 struct writeset_disk {
161 struct superblock_disk {
170 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
172 __le32 data_block_size;
173 __le32 metadata_block_size;
177 struct writeset_disk current_writeset;
180 * Only these two fields are valid within the metadata snapshot.
182 __le64 writeset_tree_root;
183 __le64 era_array_root;
185 __le64 metadata_snap;
188 /*----------------------------------------------------------------
189 * Superblock validation
190 *--------------------------------------------------------------*/
191 static void sb_prepare_for_write(struct dm_block_validator *v,
193 size_t sb_block_size)
195 struct superblock_disk *disk = dm_block_data(b);
197 disk->blocknr = cpu_to_le64(dm_block_location(b));
198 disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags,
199 sb_block_size - sizeof(__le32),
200 SUPERBLOCK_CSUM_XOR));
203 static int check_metadata_version(struct superblock_disk *disk)
205 uint32_t metadata_version = le32_to_cpu(disk->version);
206 if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
207 DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
208 metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
215 static int sb_check(struct dm_block_validator *v,
217 size_t sb_block_size)
219 struct superblock_disk *disk = dm_block_data(b);
222 if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
223 DMERR("sb_check failed: blocknr %llu: wanted %llu",
224 le64_to_cpu(disk->blocknr),
225 (unsigned long long)dm_block_location(b));
229 if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
230 DMERR("sb_check failed: magic %llu: wanted %llu",
231 le64_to_cpu(disk->magic),
232 (unsigned long long) SUPERBLOCK_MAGIC);
236 csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags,
237 sb_block_size - sizeof(__le32),
238 SUPERBLOCK_CSUM_XOR));
239 if (csum_le != disk->csum) {
240 DMERR("sb_check failed: csum %u: wanted %u",
241 le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
245 return check_metadata_version(disk);
248 static struct dm_block_validator sb_validator = {
249 .name = "superblock",
250 .prepare_for_write = sb_prepare_for_write,
254 /*----------------------------------------------------------------
255 * Low level metadata handling
256 *--------------------------------------------------------------*/
257 #define DM_ERA_METADATA_BLOCK_SIZE 4096
258 #define ERA_MAX_CONCURRENT_LOCKS 5
260 struct era_metadata {
261 struct block_device *bdev;
262 struct dm_block_manager *bm;
263 struct dm_space_map *sm;
264 struct dm_transaction_manager *tm;
266 dm_block_t block_size;
269 uint32_t current_era;
272 * We preallocate 2 writesets. When an era rolls over we
273 * switch between them. This means the allocation is done at
274 * preresume time, rather than on the io path.
276 struct writeset writesets[2];
277 struct writeset *current_writeset;
279 dm_block_t writeset_tree_root;
280 dm_block_t era_array_root;
282 struct dm_disk_bitset bitset_info;
283 struct dm_btree_info writeset_tree_info;
284 struct dm_array_info era_array_info;
286 dm_block_t metadata_snap;
289 * A flag that is set whenever a writeset has been archived.
291 bool archived_writesets;
294 * Reading the space map root can fail, so we read it into this
295 * buffer before the superblock is locked and updated.
297 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
300 static int superblock_read_lock(struct era_metadata *md,
301 struct dm_block **sblock)
303 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
304 &sb_validator, sblock);
307 static int superblock_lock_zero(struct era_metadata *md,
308 struct dm_block **sblock)
310 return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
311 &sb_validator, sblock);
314 static int superblock_lock(struct era_metadata *md,
315 struct dm_block **sblock)
317 return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
318 &sb_validator, sblock);
321 /* FIXME: duplication with cache and thin */
322 static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
327 __le64 *data_le, zero = cpu_to_le64(0);
328 unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
331 * We can't use a validator here - it may be all zeroes.
333 r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b);
337 data_le = dm_block_data(b);
339 for (i = 0; i < sb_block_size; i++) {
340 if (data_le[i] != zero) {
351 /*----------------------------------------------------------------*/
353 static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk)
355 disk->nr_bits = cpu_to_le32(core->nr_bits);
356 disk->root = cpu_to_le64(core->root);
359 static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core)
361 core->nr_bits = le32_to_cpu(disk->nr_bits);
362 core->root = le64_to_cpu(disk->root);
365 static void ws_inc(void *context, const void *value)
367 struct era_metadata *md = context;
368 struct writeset_disk ws_d;
371 memcpy(&ws_d, value, sizeof(ws_d));
372 b = le64_to_cpu(ws_d.root);
374 dm_tm_inc(md->tm, b);
377 static void ws_dec(void *context, const void *value)
379 struct era_metadata *md = context;
380 struct writeset_disk ws_d;
383 memcpy(&ws_d, value, sizeof(ws_d));
384 b = le64_to_cpu(ws_d.root);
386 dm_bitset_del(&md->bitset_info, b);
389 static int ws_eq(void *context, const void *value1, const void *value2)
391 return !memcmp(value1, value2, sizeof(struct writeset_disk));
394 /*----------------------------------------------------------------*/
396 static void setup_writeset_tree_info(struct era_metadata *md)
398 struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
399 md->writeset_tree_info.tm = md->tm;
400 md->writeset_tree_info.levels = 1;
402 vt->size = sizeof(struct writeset_disk);
408 static void setup_era_array_info(struct era_metadata *md)
411 struct dm_btree_value_type vt;
413 vt.size = sizeof(__le32);
418 dm_array_info_init(&md->era_array_info, md->tm, &vt);
421 static void setup_infos(struct era_metadata *md)
423 dm_disk_bitset_init(md->tm, &md->bitset_info);
424 setup_writeset_tree_info(md);
425 setup_era_array_info(md);
428 /*----------------------------------------------------------------*/
430 static int create_fresh_metadata(struct era_metadata *md)
434 r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
437 DMERR("dm_tm_create_with_sm failed");
443 r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
445 DMERR("couldn't create new writeset tree");
449 r = dm_array_empty(&md->era_array_info, &md->era_array_root);
451 DMERR("couldn't create era array");
458 dm_sm_destroy(md->sm);
459 dm_tm_destroy(md->tm);
464 static int save_sm_root(struct era_metadata *md)
469 r = dm_sm_root_size(md->sm, &metadata_len);
473 return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
477 static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
479 memcpy(&disk->metadata_space_map_root,
480 &md->metadata_space_map_root,
481 sizeof(md->metadata_space_map_root));
485 * Writes a superblock, including the static fields that don't get updated
486 * with every commit (possible optimisation here). 'md' should be fully
487 * constructed when this is called.
489 static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
491 disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
492 disk->flags = cpu_to_le32(0ul);
494 /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
495 memset(disk->uuid, 0, sizeof(disk->uuid));
496 disk->version = cpu_to_le32(MAX_ERA_VERSION);
498 copy_sm_root(md, disk);
500 disk->data_block_size = cpu_to_le32(md->block_size);
501 disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
502 disk->nr_blocks = cpu_to_le32(md->nr_blocks);
503 disk->current_era = cpu_to_le32(md->current_era);
505 ws_pack(&md->current_writeset->md, &disk->current_writeset);
506 disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
507 disk->era_array_root = cpu_to_le64(md->era_array_root);
508 disk->metadata_snap = cpu_to_le64(md->metadata_snap);
511 static int write_superblock(struct era_metadata *md)
514 struct dm_block *sblock;
515 struct superblock_disk *disk;
517 r = save_sm_root(md);
519 DMERR("%s: save_sm_root failed", __func__);
523 r = superblock_lock_zero(md, &sblock);
527 disk = dm_block_data(sblock);
528 prepare_superblock(md, disk);
530 return dm_tm_commit(md->tm, sblock);
534 * Assumes block_size and the infos are set.
536 static int format_metadata(struct era_metadata *md)
540 r = create_fresh_metadata(md);
544 r = write_superblock(md);
546 dm_sm_destroy(md->sm);
547 dm_tm_destroy(md->tm);
554 static int open_metadata(struct era_metadata *md)
557 struct dm_block *sblock;
558 struct superblock_disk *disk;
560 r = superblock_read_lock(md, &sblock);
562 DMERR("couldn't read_lock superblock");
566 disk = dm_block_data(sblock);
568 /* Verify the data block size hasn't changed */
569 if (le32_to_cpu(disk->data_block_size) != md->block_size) {
570 DMERR("changing the data block size (from %u to %llu) is not supported",
571 le32_to_cpu(disk->data_block_size), md->block_size);
576 r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
577 disk->metadata_space_map_root,
578 sizeof(disk->metadata_space_map_root),
581 DMERR("dm_tm_open_with_sm failed");
587 md->nr_blocks = le32_to_cpu(disk->nr_blocks);
588 md->current_era = le32_to_cpu(disk->current_era);
590 ws_unpack(&disk->current_writeset, &md->current_writeset->md);
591 md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
592 md->era_array_root = le64_to_cpu(disk->era_array_root);
593 md->metadata_snap = le64_to_cpu(disk->metadata_snap);
594 md->archived_writesets = true;
596 dm_bm_unlock(sblock);
601 dm_bm_unlock(sblock);
605 static int open_or_format_metadata(struct era_metadata *md,
609 bool unformatted = false;
611 r = superblock_all_zeroes(md->bm, &unformatted);
616 return may_format ? format_metadata(md) : -EPERM;
618 return open_metadata(md);
621 static int create_persistent_data_objects(struct era_metadata *md,
626 md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
627 ERA_MAX_CONCURRENT_LOCKS);
628 if (IS_ERR(md->bm)) {
629 DMERR("could not create block manager");
630 return PTR_ERR(md->bm);
633 r = open_or_format_metadata(md, may_format);
635 dm_block_manager_destroy(md->bm);
640 static void destroy_persistent_data_objects(struct era_metadata *md)
642 dm_sm_destroy(md->sm);
643 dm_tm_destroy(md->tm);
644 dm_block_manager_destroy(md->bm);
648 * This waits until all era_map threads have picked up the new filter.
650 static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
652 rcu_assign_pointer(md->current_writeset, new_writeset);
656 /*----------------------------------------------------------------
657 * Writesets get 'digested' into the main era array.
659 * We're using a coroutine here so the worker thread can do the digestion,
660 * thus avoiding synchronisation of the metadata. Digesting a whole
661 * writeset in one go would cause too much latency.
662 *--------------------------------------------------------------*/
665 unsigned nr_bits, current_bit;
666 struct writeset_metadata writeset;
668 struct dm_disk_bitset info;
670 int (*step)(struct era_metadata *, struct digest *);
673 static int metadata_digest_lookup_writeset(struct era_metadata *md,
676 static int metadata_digest_remove_writeset(struct era_metadata *md,
680 uint64_t key = d->era;
682 r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
683 &key, &md->writeset_tree_root);
685 DMERR("%s: dm_btree_remove failed", __func__);
689 d->step = metadata_digest_lookup_writeset;
693 #define INSERTS_PER_STEP 100
695 static int metadata_digest_transcribe_writeset(struct era_metadata *md,
700 unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
702 for (b = d->current_bit; b < e; b++) {
703 r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
705 DMERR("%s: writeset_marked_on_disk failed", __func__);
712 __dm_bless_for_disk(&d->value);
713 r = dm_array_set_value(&md->era_array_info, md->era_array_root,
714 b, &d->value, &md->era_array_root);
716 DMERR("%s: dm_array_set_value failed", __func__);
722 d->step = metadata_digest_remove_writeset;
729 static int metadata_digest_lookup_writeset(struct era_metadata *md,
734 struct writeset_disk disk;
736 r = dm_btree_find_lowest_key(&md->writeset_tree_info,
737 md->writeset_tree_root, &key);
743 r = dm_btree_lookup(&md->writeset_tree_info,
744 md->writeset_tree_root, &key, &disk);
751 DMERR("%s: dm_btree_lookup failed", __func__);
755 ws_unpack(&disk, &d->writeset);
756 d->value = cpu_to_le32(key);
759 * We initialise another bitset info to avoid any caching side effects
760 * with the previous one.
762 dm_disk_bitset_init(md->tm, &d->info);
764 d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
766 d->step = metadata_digest_transcribe_writeset;
771 static int metadata_digest_start(struct era_metadata *md, struct digest *d)
776 memset(d, 0, sizeof(*d));
777 d->step = metadata_digest_lookup_writeset;
782 /*----------------------------------------------------------------
783 * High level metadata interface. Target methods should use these, and not
784 * the lower level ones.
785 *--------------------------------------------------------------*/
786 static struct era_metadata *metadata_open(struct block_device *bdev,
791 struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
797 md->block_size = block_size;
799 md->writesets[0].md.root = INVALID_WRITESET_ROOT;
800 md->writesets[1].md.root = INVALID_WRITESET_ROOT;
801 md->current_writeset = &md->writesets[0];
803 r = create_persistent_data_objects(md, may_format);
812 static void metadata_close(struct era_metadata *md)
814 writeset_free(&md->writesets[0]);
815 writeset_free(&md->writesets[1]);
816 destroy_persistent_data_objects(md);
820 static bool valid_nr_blocks(dm_block_t n)
823 * dm_bitset restricts us to 2^32. test_bit & co. restrict us
824 * further to 2^31 - 1
826 return n < (1ull << 31);
829 static int metadata_resize(struct era_metadata *md, void *arg)
832 dm_block_t *new_size = arg;
835 if (!valid_nr_blocks(*new_size)) {
836 DMERR("Invalid number of origin blocks %llu",
837 (unsigned long long) *new_size);
841 writeset_free(&md->writesets[0]);
842 writeset_free(&md->writesets[1]);
844 r = writeset_alloc(&md->writesets[0], *new_size);
846 DMERR("%s: writeset_alloc failed for writeset 0", __func__);
850 r = writeset_alloc(&md->writesets[1], *new_size);
852 DMERR("%s: writeset_alloc failed for writeset 1", __func__);
853 writeset_free(&md->writesets[0]);
857 value = cpu_to_le32(0u);
858 __dm_bless_for_disk(&value);
859 r = dm_array_resize(&md->era_array_info, md->era_array_root,
860 md->nr_blocks, *new_size,
861 &value, &md->era_array_root);
863 DMERR("%s: dm_array_resize failed", __func__);
864 writeset_free(&md->writesets[0]);
865 writeset_free(&md->writesets[1]);
869 md->nr_blocks = *new_size;
873 static int metadata_era_archive(struct era_metadata *md)
877 struct writeset_disk value;
879 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
880 &md->current_writeset->md.root);
882 DMERR("%s: dm_bitset_flush failed", __func__);
886 ws_pack(&md->current_writeset->md, &value);
888 keys[0] = md->current_era;
889 __dm_bless_for_disk(&value);
890 r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
891 keys, &value, &md->writeset_tree_root);
893 DMERR("%s: couldn't insert writeset into btree", __func__);
894 /* FIXME: fail mode */
898 md->current_writeset->md.root = INVALID_WRITESET_ROOT;
899 md->archived_writesets = true;
904 static struct writeset *next_writeset(struct era_metadata *md)
906 return (md->current_writeset == &md->writesets[0]) ?
907 &md->writesets[1] : &md->writesets[0];
910 static int metadata_new_era(struct era_metadata *md)
913 struct writeset *new_writeset = next_writeset(md);
915 r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
917 DMERR("%s: writeset_init failed", __func__);
921 swap_writeset(md, new_writeset);
927 static int metadata_era_rollover(struct era_metadata *md)
931 if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
932 r = metadata_era_archive(md);
934 DMERR("%s: metadata_archive_era failed", __func__);
935 /* FIXME: fail mode? */
940 r = metadata_new_era(md);
942 DMERR("%s: new era failed", __func__);
943 /* FIXME: fail mode */
950 static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
956 ws = rcu_dereference(md->current_writeset);
957 r = writeset_marked(ws, block);
963 static int metadata_commit(struct era_metadata *md)
966 struct dm_block *sblock;
968 if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
969 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
970 &md->current_writeset->md.root);
972 DMERR("%s: bitset flush failed", __func__);
977 r = dm_tm_pre_commit(md->tm);
979 DMERR("%s: pre commit failed", __func__);
983 r = save_sm_root(md);
985 DMERR("%s: save_sm_root failed", __func__);
989 r = superblock_lock(md, &sblock);
991 DMERR("%s: superblock lock failed", __func__);
995 prepare_superblock(md, dm_block_data(sblock));
997 return dm_tm_commit(md->tm, sblock);
1000 static int metadata_checkpoint(struct era_metadata *md)
1003 * For now we just rollover, but later I want to put a check in to
1004 * avoid this if the filter is still pretty fresh.
1006 return metadata_era_rollover(md);
1010 * Metadata snapshots allow userland to access era data.
1012 static int metadata_take_snap(struct era_metadata *md)
1015 struct dm_block *clone;
1017 if (md->metadata_snap != SUPERBLOCK_LOCATION) {
1018 DMERR("%s: metadata snapshot already exists", __func__);
1022 r = metadata_era_rollover(md);
1024 DMERR("%s: era rollover failed", __func__);
1028 r = metadata_commit(md);
1030 DMERR("%s: pre commit failed", __func__);
1034 r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
1036 DMERR("%s: couldn't increment superblock", __func__);
1040 r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
1041 &sb_validator, &clone, &inc);
1043 DMERR("%s: couldn't shadow superblock", __func__);
1044 dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
1049 r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
1051 DMERR("%s: couldn't inc writeset tree root", __func__);
1052 dm_tm_unlock(md->tm, clone);
1056 r = dm_sm_inc_block(md->sm, md->era_array_root);
1058 DMERR("%s: couldn't inc era tree root", __func__);
1059 dm_sm_dec_block(md->sm, md->writeset_tree_root);
1060 dm_tm_unlock(md->tm, clone);
1064 md->metadata_snap = dm_block_location(clone);
1066 dm_tm_unlock(md->tm, clone);
1071 static int metadata_drop_snap(struct era_metadata *md)
1074 dm_block_t location;
1075 struct dm_block *clone;
1076 struct superblock_disk *disk;
1078 if (md->metadata_snap == SUPERBLOCK_LOCATION) {
1079 DMERR("%s: no snap to drop", __func__);
1083 r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
1085 DMERR("%s: couldn't read lock superblock clone", __func__);
1090 * Whatever happens now we'll commit with no record of the metadata
1093 md->metadata_snap = SUPERBLOCK_LOCATION;
1095 disk = dm_block_data(clone);
1096 r = dm_btree_del(&md->writeset_tree_info,
1097 le64_to_cpu(disk->writeset_tree_root));
1099 DMERR("%s: error deleting writeset tree clone", __func__);
1100 dm_tm_unlock(md->tm, clone);
1104 r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
1106 DMERR("%s: error deleting era array clone", __func__);
1107 dm_tm_unlock(md->tm, clone);
1111 location = dm_block_location(clone);
1112 dm_tm_unlock(md->tm, clone);
1114 return dm_sm_dec_block(md->sm, location);
1117 struct metadata_stats {
1124 static int metadata_get_stats(struct era_metadata *md, void *ptr)
1127 struct metadata_stats *s = ptr;
1128 dm_block_t nr_free, nr_total;
1130 r = dm_sm_get_nr_free(md->sm, &nr_free);
1132 DMERR("dm_sm_get_nr_free returned %d", r);
1136 r = dm_sm_get_nr_blocks(md->sm, &nr_total);
1138 DMERR("dm_pool_get_metadata_dev_size returned %d", r);
1142 s->used = nr_total - nr_free;
1143 s->total = nr_total;
1144 s->snap = md->metadata_snap;
1145 s->era = md->current_era;
1150 /*----------------------------------------------------------------*/
1153 struct dm_target *ti;
1154 struct dm_target_callbacks callbacks;
1156 struct dm_dev *metadata_dev;
1157 struct dm_dev *origin_dev;
1159 dm_block_t nr_blocks;
1160 uint32_t sectors_per_block;
1161 int sectors_per_block_shift;
1162 struct era_metadata *md;
1164 struct workqueue_struct *wq;
1165 struct work_struct worker;
1167 spinlock_t deferred_lock;
1168 struct bio_list deferred_bios;
1170 spinlock_t rpc_lock;
1171 struct list_head rpc_calls;
1173 struct digest digest;
1178 struct list_head list;
1180 int (*fn0)(struct era_metadata *);
1181 int (*fn1)(struct era_metadata *, void *);
1185 struct completion complete;
1188 /*----------------------------------------------------------------
1190 *---------------------------------------------------------------*/
1191 static bool block_size_is_power_of_two(struct era *era)
1193 return era->sectors_per_block_shift >= 0;
1196 static dm_block_t get_block(struct era *era, struct bio *bio)
1198 sector_t block_nr = bio->bi_iter.bi_sector;
1200 if (!block_size_is_power_of_two(era))
1201 (void) sector_div(block_nr, era->sectors_per_block);
1203 block_nr >>= era->sectors_per_block_shift;
1208 static void remap_to_origin(struct era *era, struct bio *bio)
1210 bio_set_dev(bio, era->origin_dev->bdev);
1213 /*----------------------------------------------------------------
1215 *--------------------------------------------------------------*/
1216 static void wake_worker(struct era *era)
1218 if (!atomic_read(&era->suspended))
1219 queue_work(era->wq, &era->worker);
1222 static void process_old_eras(struct era *era)
1226 if (!era->digest.step)
1229 r = era->digest.step(era->md, &era->digest);
1231 DMERR("%s: digest step failed, stopping digestion", __func__);
1232 era->digest.step = NULL;
1234 } else if (era->digest.step)
1238 static void process_deferred_bios(struct era *era)
1241 struct bio_list deferred_bios, marked_bios;
1243 struct blk_plug plug;
1244 bool commit_needed = false;
1245 bool failed = false;
1246 struct writeset *ws = era->md->current_writeset;
1248 bio_list_init(&deferred_bios);
1249 bio_list_init(&marked_bios);
1251 spin_lock(&era->deferred_lock);
1252 bio_list_merge(&deferred_bios, &era->deferred_bios);
1253 bio_list_init(&era->deferred_bios);
1254 spin_unlock(&era->deferred_lock);
1256 if (bio_list_empty(&deferred_bios))
1259 while ((bio = bio_list_pop(&deferred_bios))) {
1260 r = writeset_test_and_set(&era->md->bitset_info, ws,
1261 get_block(era, bio));
1264 * This is bad news, we need to rollback.
1269 commit_needed = true;
1271 bio_list_add(&marked_bios, bio);
1274 if (commit_needed) {
1275 r = metadata_commit(era->md);
1281 while ((bio = bio_list_pop(&marked_bios)))
1284 blk_start_plug(&plug);
1285 while ((bio = bio_list_pop(&marked_bios))) {
1287 * Only update the in-core writeset if the on-disk one
1291 set_bit(get_block(era, bio), ws->bits);
1292 generic_make_request(bio);
1294 blk_finish_plug(&plug);
1298 static void process_rpc_calls(struct era *era)
1301 bool need_commit = false;
1302 struct list_head calls;
1303 struct rpc *rpc, *tmp;
1305 INIT_LIST_HEAD(&calls);
1306 spin_lock(&era->rpc_lock);
1307 list_splice_init(&era->rpc_calls, &calls);
1308 spin_unlock(&era->rpc_lock);
1310 list_for_each_entry_safe(rpc, tmp, &calls, list) {
1311 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
1316 r = metadata_commit(era->md);
1318 list_for_each_entry_safe(rpc, tmp, &calls, list)
1322 list_for_each_entry_safe(rpc, tmp, &calls, list)
1323 complete(&rpc->complete);
1326 static void kick_off_digest(struct era *era)
1328 if (era->md->archived_writesets) {
1329 era->md->archived_writesets = false;
1330 metadata_digest_start(era->md, &era->digest);
1334 static void do_work(struct work_struct *ws)
1336 struct era *era = container_of(ws, struct era, worker);
1338 kick_off_digest(era);
1339 process_old_eras(era);
1340 process_deferred_bios(era);
1341 process_rpc_calls(era);
1344 static void defer_bio(struct era *era, struct bio *bio)
1346 spin_lock(&era->deferred_lock);
1347 bio_list_add(&era->deferred_bios, bio);
1348 spin_unlock(&era->deferred_lock);
1354 * Make an rpc call to the worker to change the metadata.
1356 static int perform_rpc(struct era *era, struct rpc *rpc)
1359 init_completion(&rpc->complete);
1361 spin_lock(&era->rpc_lock);
1362 list_add(&rpc->list, &era->rpc_calls);
1363 spin_unlock(&era->rpc_lock);
1366 wait_for_completion(&rpc->complete);
1371 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
1377 return perform_rpc(era, &rpc);
1380 static int in_worker1(struct era *era,
1381 int (*fn)(struct era_metadata *, void *), void *arg)
1388 return perform_rpc(era, &rpc);
1391 static void start_worker(struct era *era)
1393 atomic_set(&era->suspended, 0);
1396 static void stop_worker(struct era *era)
1398 atomic_set(&era->suspended, 1);
1399 drain_workqueue(era->wq);
1402 /*----------------------------------------------------------------
1404 *--------------------------------------------------------------*/
1405 static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
1407 struct request_queue *q = bdev_get_queue(dev->bdev);
1408 return bdi_congested(q->backing_dev_info, bdi_bits);
1411 static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1413 struct era *era = container_of(cb, struct era, callbacks);
1414 return dev_is_congested(era->origin_dev, bdi_bits);
1417 static void era_destroy(struct era *era)
1420 metadata_close(era->md);
1423 destroy_workqueue(era->wq);
1425 if (era->origin_dev)
1426 dm_put_device(era->ti, era->origin_dev);
1428 if (era->metadata_dev)
1429 dm_put_device(era->ti, era->metadata_dev);
1434 static dm_block_t calc_nr_blocks(struct era *era)
1436 return dm_sector_div_up(era->ti->len, era->sectors_per_block);
1439 static bool valid_block_size(dm_block_t block_size)
1441 bool greater_than_zero = block_size > 0;
1442 bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0;
1444 return greater_than_zero && multiple_of_min_block_size;
1448 * <metadata dev> <data dev> <data block size (sectors)>
1450 static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
1455 struct era_metadata *md;
1458 ti->error = "Invalid argument count";
1462 era = kzalloc(sizeof(*era), GFP_KERNEL);
1464 ti->error = "Error allocating era structure";
1470 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
1472 ti->error = "Error opening metadata device";
1477 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
1479 ti->error = "Error opening data device";
1484 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
1486 ti->error = "Error parsing block size";
1491 r = dm_set_target_max_io_len(ti, era->sectors_per_block);
1493 ti->error = "could not set max io len";
1498 if (!valid_block_size(era->sectors_per_block)) {
1499 ti->error = "Invalid block size";
1503 if (era->sectors_per_block & (era->sectors_per_block - 1))
1504 era->sectors_per_block_shift = -1;
1506 era->sectors_per_block_shift = __ffs(era->sectors_per_block);
1508 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
1510 ti->error = "Error reading metadata";
1516 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1518 ti->error = "could not create workqueue for metadata object";
1522 INIT_WORK(&era->worker, do_work);
1524 spin_lock_init(&era->deferred_lock);
1525 bio_list_init(&era->deferred_bios);
1527 spin_lock_init(&era->rpc_lock);
1528 INIT_LIST_HEAD(&era->rpc_calls);
1531 ti->num_flush_bios = 1;
1532 ti->flush_supported = true;
1534 ti->num_discard_bios = 1;
1535 era->callbacks.congested_fn = era_is_congested;
1536 dm_table_add_target_callbacks(ti->table, &era->callbacks);
1541 static void era_dtr(struct dm_target *ti)
1543 era_destroy(ti->private);
1546 static int era_map(struct dm_target *ti, struct bio *bio)
1548 struct era *era = ti->private;
1549 dm_block_t block = get_block(era, bio);
1552 * All bios get remapped to the origin device. We do this now, but
1553 * it may not get issued until later. Depending on whether the
1554 * block is marked in this era.
1556 remap_to_origin(era, bio);
1559 * REQ_PREFLUSH bios carry no data, so we're not interested in them.
1561 if (!(bio->bi_opf & REQ_PREFLUSH) &&
1562 (bio_data_dir(bio) == WRITE) &&
1563 !metadata_current_marked(era->md, block)) {
1564 defer_bio(era, bio);
1565 return DM_MAPIO_SUBMITTED;
1568 return DM_MAPIO_REMAPPED;
1571 static void era_postsuspend(struct dm_target *ti)
1574 struct era *era = ti->private;
1576 r = in_worker0(era, metadata_era_archive);
1578 DMERR("%s: couldn't archive current era", __func__);
1579 /* FIXME: fail mode */
1584 r = metadata_commit(era->md);
1586 DMERR("%s: metadata_commit failed", __func__);
1587 /* FIXME: fail mode */
1591 static int era_preresume(struct dm_target *ti)
1594 struct era *era = ti->private;
1595 dm_block_t new_size = calc_nr_blocks(era);
1597 if (era->nr_blocks != new_size) {
1598 r = metadata_resize(era->md, &new_size);
1600 DMERR("%s: metadata_resize failed", __func__);
1604 r = metadata_commit(era->md);
1606 DMERR("%s: metadata_commit failed", __func__);
1610 era->nr_blocks = new_size;
1615 r = in_worker0(era, metadata_era_rollover);
1617 DMERR("%s: metadata_era_rollover failed", __func__);
1627 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1628 * <current era> <held metadata root | '-'>
1630 static void era_status(struct dm_target *ti, status_type_t type,
1631 unsigned status_flags, char *result, unsigned maxlen)
1634 struct era *era = ti->private;
1636 struct metadata_stats stats;
1637 char buf[BDEVNAME_SIZE];
1640 case STATUSTYPE_INFO:
1641 r = in_worker1(era, metadata_get_stats, &stats);
1645 DMEMIT("%u %llu/%llu %u",
1646 (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
1647 (unsigned long long) stats.used,
1648 (unsigned long long) stats.total,
1649 (unsigned) stats.era);
1651 if (stats.snap != SUPERBLOCK_LOCATION)
1652 DMEMIT(" %llu", stats.snap);
1657 case STATUSTYPE_TABLE:
1658 format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
1660 format_dev_t(buf, era->origin_dev->bdev->bd_dev);
1661 DMEMIT("%s %u", buf, era->sectors_per_block);
1671 static int era_message(struct dm_target *ti, unsigned argc, char **argv,
1672 char *result, unsigned maxlen)
1674 struct era *era = ti->private;
1677 DMERR("incorrect number of message arguments");
1681 if (!strcasecmp(argv[0], "checkpoint"))
1682 return in_worker0(era, metadata_checkpoint);
1684 if (!strcasecmp(argv[0], "take_metadata_snap"))
1685 return in_worker0(era, metadata_take_snap);
1687 if (!strcasecmp(argv[0], "drop_metadata_snap"))
1688 return in_worker0(era, metadata_drop_snap);
1690 DMERR("unsupported message '%s'", argv[0]);
1694 static sector_t get_dev_size(struct dm_dev *dev)
1696 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1699 static int era_iterate_devices(struct dm_target *ti,
1700 iterate_devices_callout_fn fn, void *data)
1702 struct era *era = ti->private;
1703 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
1706 static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
1708 struct era *era = ti->private;
1709 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
1712 * If the system-determined stacked limits are compatible with the
1713 * era device's blocksize (io_opt is a factor) do not override them.
1715 if (io_opt_sectors < era->sectors_per_block ||
1716 do_div(io_opt_sectors, era->sectors_per_block)) {
1717 blk_limits_io_min(limits, 0);
1718 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
1722 /*----------------------------------------------------------------*/
1724 static struct target_type era_target = {
1726 .version = {1, 0, 0},
1727 .module = THIS_MODULE,
1731 .postsuspend = era_postsuspend,
1732 .preresume = era_preresume,
1733 .status = era_status,
1734 .message = era_message,
1735 .iterate_devices = era_iterate_devices,
1736 .io_hints = era_io_hints
1739 static int __init dm_era_init(void)
1743 r = dm_register_target(&era_target);
1745 DMERR("era target registration failed: %d", r);
1752 static void __exit dm_era_exit(void)
1754 dm_unregister_target(&era_target);
1757 module_init(dm_era_init);
1758 module_exit(dm_era_exit);
1760 MODULE_DESCRIPTION(DM_NAME " era target");
1761 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
1762 MODULE_LICENSE("GPL");