2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-space-map.h"
8 #include "dm-space-map-common.h"
9 #include "dm-space-map-metadata.h"
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/device-mapper.h>
15 #define DM_MSG_PREFIX "space map metadata"
17 /*----------------------------------------------------------------*/
20 * An edge triggered threshold.
26 dm_block_t current_value;
27 dm_sm_threshold_fn fn;
31 static void threshold_init(struct threshold *t)
33 t->threshold_set = false;
37 static void set_threshold(struct threshold *t, dm_block_t value,
38 dm_sm_threshold_fn fn, void *context)
40 t->threshold_set = true;
46 static bool below_threshold(struct threshold *t, dm_block_t value)
48 return t->threshold_set && value <= t->threshold;
51 static bool threshold_already_triggered(struct threshold *t)
53 return t->value_set && below_threshold(t, t->current_value);
56 static void check_threshold(struct threshold *t, dm_block_t value)
58 if (below_threshold(t, value) &&
59 !threshold_already_triggered(t))
63 t->current_value = value;
66 /*----------------------------------------------------------------*/
69 * Space map interface.
71 * The low level disk format is written using the standard btree and
72 * transaction manager. This means that performing disk operations may
73 * cause us to recurse into the space map in order to allocate new blocks.
74 * For this reason we have a pool of pre-allocated blocks large enough to
75 * service any metadata_ll_disk operation.
79 * FIXME: we should calculate this based on the size of the device.
80 * Only the metadata space map needs this functionality.
82 #define MAX_RECURSIVE_ALLOCATIONS 1024
90 enum block_op_type type;
94 struct bop_ring_buffer {
97 struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
100 static void brb_init(struct bop_ring_buffer *brb)
106 static bool brb_empty(struct bop_ring_buffer *brb)
108 return brb->begin == brb->end;
111 static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
113 unsigned r = old + 1;
114 return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
117 static int brb_push(struct bop_ring_buffer *brb,
118 enum block_op_type type, dm_block_t b)
120 struct block_op *bop;
121 unsigned next = brb_next(brb, brb->end);
124 * We don't allow the last bop to be filled, this way we can
125 * differentiate between full and empty.
127 if (next == brb->begin)
130 bop = brb->bops + brb->end;
139 static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
141 struct block_op *bop;
146 bop = brb->bops + brb->begin;
147 result->type = bop->type;
148 result->block = bop->block;
153 static int brb_pop(struct bop_ring_buffer *brb)
158 brb->begin = brb_next(brb, brb->begin);
163 /*----------------------------------------------------------------*/
166 struct dm_space_map sm;
169 struct ll_disk old_ll;
173 unsigned recursion_count;
174 unsigned allocated_this_transaction;
175 struct bop_ring_buffer uncommitted;
177 struct threshold threshold;
180 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
182 int r = brb_push(&smm->uncommitted, type, b);
185 DMERR("too many recursive allocations");
192 static int commit_bop(struct sm_metadata *smm, struct block_op *op)
195 enum allocation_event ev;
199 r = sm_ll_inc(&smm->ll, op->block, &ev);
203 r = sm_ll_dec(&smm->ll, op->block, &ev);
210 static void in(struct sm_metadata *smm)
212 smm->recursion_count++;
215 static int apply_bops(struct sm_metadata *smm)
219 while (!brb_empty(&smm->uncommitted)) {
222 r = brb_peek(&smm->uncommitted, &bop);
224 DMERR("bug in bop ring buffer");
228 r = commit_bop(smm, &bop);
232 brb_pop(&smm->uncommitted);
238 static int out(struct sm_metadata *smm)
243 * If we're not recursing then very bad things are happening.
245 if (!smm->recursion_count) {
246 DMERR("lost track of recursion depth");
250 if (smm->recursion_count == 1)
253 smm->recursion_count--;
259 * When using the out() function above, we often want to combine an error
260 * code for the operation run in the recursive context with that from
263 static int combine_errors(int r1, int r2)
268 static int recursing(struct sm_metadata *smm)
270 return smm->recursion_count;
273 static void sm_metadata_destroy(struct dm_space_map *sm)
275 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
280 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
282 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
284 *count = smm->ll.nr_blocks;
289 static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
291 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
293 *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
294 smm->allocated_this_transaction;
299 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
304 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
305 unsigned adjustment = 0;
308 * We may have some uncommitted adjustments to add. This list
309 * should always be really short.
311 for (i = smm->uncommitted.begin;
312 i != smm->uncommitted.end;
313 i = brb_next(&smm->uncommitted, i)) {
314 struct block_op *op = smm->uncommitted.bops + i;
330 r = sm_ll_lookup(&smm->ll, b, result);
334 *result += adjustment;
339 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
340 dm_block_t b, int *result)
342 int r, adjustment = 0;
344 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
348 * We may have some uncommitted adjustments to add. This list
349 * should always be really short.
351 for (i = smm->uncommitted.begin;
352 i != smm->uncommitted.end;
353 i = brb_next(&smm->uncommitted, i)) {
355 struct block_op *op = smm->uncommitted.bops + i;
371 if (adjustment > 1) {
376 r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
382 * We err on the side of caution, and always return true.
386 *result = rc + adjustment > 1;
391 static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
395 enum allocation_event ev;
396 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
398 if (smm->recursion_count) {
399 DMERR("cannot recurse set_count()");
404 r = sm_ll_insert(&smm->ll, b, count, &ev);
407 return combine_errors(r, r2);
410 static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
413 enum allocation_event ev;
414 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
417 r = add_bop(smm, BOP_INC, b);
420 r = sm_ll_inc(&smm->ll, b, &ev);
424 return combine_errors(r, r2);
427 static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
430 enum allocation_event ev;
431 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
434 r = add_bop(smm, BOP_DEC, b);
437 r = sm_ll_dec(&smm->ll, b, &ev);
441 return combine_errors(r, r2);
444 static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
447 enum allocation_event ev;
448 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
451 * Any block we allocate has to be free in both the old and current ll.
453 r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
456 * There's no free block between smm->begin and the end of the metadata device.
457 * We search before smm->begin in case something has been freed.
459 r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
468 r = add_bop(smm, BOP_INC, *b);
471 r = sm_ll_inc(&smm->ll, *b, &ev);
476 smm->allocated_this_transaction++;
478 return combine_errors(r, r2);
481 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
484 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
486 int r = sm_metadata_new_block_(sm, b);
488 DMERR_LIMIT("unable to allocate new metadata block");
492 r = sm_metadata_get_nr_free(sm, &count);
494 DMERR_LIMIT("couldn't get free block count");
498 check_threshold(&smm->threshold, count);
503 static int sm_metadata_commit(struct dm_space_map *sm)
506 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
508 r = sm_ll_commit(&smm->ll);
512 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
513 smm->allocated_this_transaction = 0;
518 static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
519 dm_block_t threshold,
520 dm_sm_threshold_fn fn,
523 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
525 set_threshold(&smm->threshold, threshold, fn, context);
530 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
532 *result = sizeof(struct disk_sm_root);
537 static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
539 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
540 struct disk_sm_root root_le;
542 root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
543 root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
544 root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
545 root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
547 if (max < sizeof(root_le))
550 memcpy(where_le, &root_le, sizeof(root_le));
555 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
557 static const struct dm_space_map ops = {
558 .destroy = sm_metadata_destroy,
559 .extend = sm_metadata_extend,
560 .get_nr_blocks = sm_metadata_get_nr_blocks,
561 .get_nr_free = sm_metadata_get_nr_free,
562 .get_count = sm_metadata_get_count,
563 .count_is_more_than_one = sm_metadata_count_is_more_than_one,
564 .set_count = sm_metadata_set_count,
565 .inc_block = sm_metadata_inc_block,
566 .dec_block = sm_metadata_dec_block,
567 .new_block = sm_metadata_new_block,
568 .commit = sm_metadata_commit,
569 .root_size = sm_metadata_root_size,
570 .copy_root = sm_metadata_copy_root,
571 .register_threshold_callback = sm_metadata_register_threshold_callback
574 /*----------------------------------------------------------------*/
577 * When a new space map is created that manages its own space. We use
578 * this tiny bootstrap allocator.
580 static void sm_bootstrap_destroy(struct dm_space_map *sm)
584 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
586 DMERR("bootstrap doesn't support extend");
591 static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
593 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
595 *count = smm->ll.nr_blocks;
600 static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
602 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
604 *count = smm->ll.nr_blocks - smm->begin;
609 static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
612 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
614 *result = (b < smm->begin) ? 1 : 0;
619 static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
620 dm_block_t b, int *result)
627 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
630 DMERR("bootstrap doesn't support set_count");
635 static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
637 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
640 * We know the entire device is unused.
642 if (smm->begin == smm->ll.nr_blocks)
650 static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
652 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
654 return add_bop(smm, BOP_INC, b);
657 static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
659 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
661 return add_bop(smm, BOP_DEC, b);
664 static int sm_bootstrap_commit(struct dm_space_map *sm)
669 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
671 DMERR("bootstrap doesn't support root_size");
676 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
679 DMERR("bootstrap doesn't support copy_root");
684 static const struct dm_space_map bootstrap_ops = {
685 .destroy = sm_bootstrap_destroy,
686 .extend = sm_bootstrap_extend,
687 .get_nr_blocks = sm_bootstrap_get_nr_blocks,
688 .get_nr_free = sm_bootstrap_get_nr_free,
689 .get_count = sm_bootstrap_get_count,
690 .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
691 .set_count = sm_bootstrap_set_count,
692 .inc_block = sm_bootstrap_inc_block,
693 .dec_block = sm_bootstrap_dec_block,
694 .new_block = sm_bootstrap_new_block,
695 .commit = sm_bootstrap_commit,
696 .root_size = sm_bootstrap_root_size,
697 .copy_root = sm_bootstrap_copy_root,
698 .register_threshold_callback = NULL
701 /*----------------------------------------------------------------*/
703 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
706 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
707 dm_block_t old_len = smm->ll.nr_blocks;
710 * Flick into a mode where all blocks get allocated in the new area.
712 smm->begin = old_len;
713 memcpy(sm, &bootstrap_ops, sizeof(*sm));
718 r = sm_ll_extend(&smm->ll, extra_blocks);
723 * We repeatedly increment then commit until the commit doesn't
724 * allocate any new blocks.
727 for (i = old_len; !r && i < smm->begin; i++)
728 r = add_bop(smm, BOP_INC, i);
733 old_len = smm->begin;
737 DMERR("%s: apply_bops failed", __func__);
741 r = sm_ll_commit(&smm->ll);
745 } while (old_len != smm->begin);
749 * Switch back to normal behaviour.
751 memcpy(sm, &ops, sizeof(*sm));
755 /*----------------------------------------------------------------*/
757 struct dm_space_map *dm_sm_metadata_init(void)
759 struct sm_metadata *smm;
761 smm = kmalloc(sizeof(*smm), GFP_KERNEL);
763 return ERR_PTR(-ENOMEM);
765 memcpy(&smm->sm, &ops, sizeof(smm->sm));
770 int dm_sm_metadata_create(struct dm_space_map *sm,
771 struct dm_transaction_manager *tm,
772 dm_block_t nr_blocks,
773 dm_block_t superblock)
777 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
779 smm->begin = superblock + 1;
780 smm->recursion_count = 0;
781 smm->allocated_this_transaction = 0;
782 brb_init(&smm->uncommitted);
783 threshold_init(&smm->threshold);
785 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
787 r = sm_ll_new_metadata(&smm->ll, tm);
789 if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
790 nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
791 r = sm_ll_extend(&smm->ll, nr_blocks);
793 memcpy(&smm->sm, &ops, sizeof(smm->sm));
798 * Now we need to update the newly created data structures with the
799 * allocated blocks that they were built from.
801 for (i = superblock; !r && i < smm->begin; i++)
802 r = add_bop(smm, BOP_INC, i);
809 DMERR("%s: apply_bops failed", __func__);
813 return sm_metadata_commit(sm);
816 int dm_sm_metadata_open(struct dm_space_map *sm,
817 struct dm_transaction_manager *tm,
818 void *root_le, size_t len)
821 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
823 r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
828 smm->recursion_count = 0;
829 smm->allocated_this_transaction = 0;
830 brb_init(&smm->uncommitted);
831 threshold_init(&smm->threshold);
833 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));