2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-space-map.h"
8 #include "dm-space-map-common.h"
9 #include "dm-space-map-metadata.h"
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/device-mapper.h>
14 #include <linux/kernel.h>
16 #define DM_MSG_PREFIX "space map metadata"
18 /*----------------------------------------------------------------*/
21 * An edge triggered threshold.
27 dm_block_t current_value;
28 dm_sm_threshold_fn fn;
32 static void threshold_init(struct threshold *t)
34 t->threshold_set = false;
38 static void set_threshold(struct threshold *t, dm_block_t value,
39 dm_sm_threshold_fn fn, void *context)
41 t->threshold_set = true;
47 static bool below_threshold(struct threshold *t, dm_block_t value)
49 return t->threshold_set && value <= t->threshold;
52 static bool threshold_already_triggered(struct threshold *t)
54 return t->value_set && below_threshold(t, t->current_value);
57 static void check_threshold(struct threshold *t, dm_block_t value)
59 if (below_threshold(t, value) &&
60 !threshold_already_triggered(t))
64 t->current_value = value;
67 /*----------------------------------------------------------------*/
70 * Space map interface.
72 * The low level disk format is written using the standard btree and
73 * transaction manager. This means that performing disk operations may
74 * cause us to recurse into the space map in order to allocate new blocks.
75 * For this reason we have a pool of pre-allocated blocks large enough to
76 * service any metadata_ll_disk operation.
80 * FIXME: we should calculate this based on the size of the device.
81 * Only the metadata space map needs this functionality.
83 #define MAX_RECURSIVE_ALLOCATIONS 1024
91 enum block_op_type type;
95 struct bop_ring_buffer {
98 struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
101 static void brb_init(struct bop_ring_buffer *brb)
107 static bool brb_empty(struct bop_ring_buffer *brb)
109 return brb->begin == brb->end;
112 static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
114 unsigned r = old + 1;
115 return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
118 static int brb_push(struct bop_ring_buffer *brb,
119 enum block_op_type type, dm_block_t b)
121 struct block_op *bop;
122 unsigned next = brb_next(brb, brb->end);
125 * We don't allow the last bop to be filled, this way we can
126 * differentiate between full and empty.
128 if (next == brb->begin)
131 bop = brb->bops + brb->end;
140 static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
142 struct block_op *bop;
147 bop = brb->bops + brb->begin;
148 result->type = bop->type;
149 result->block = bop->block;
154 static int brb_pop(struct bop_ring_buffer *brb)
159 brb->begin = brb_next(brb, brb->begin);
164 /*----------------------------------------------------------------*/
167 struct dm_space_map sm;
170 struct ll_disk old_ll;
174 unsigned recursion_count;
175 unsigned allocated_this_transaction;
176 struct bop_ring_buffer uncommitted;
178 struct threshold threshold;
181 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
183 int r = brb_push(&smm->uncommitted, type, b);
186 DMERR("too many recursive allocations");
193 static int commit_bop(struct sm_metadata *smm, struct block_op *op)
196 enum allocation_event ev;
200 r = sm_ll_inc(&smm->ll, op->block, &ev);
204 r = sm_ll_dec(&smm->ll, op->block, &ev);
211 static void in(struct sm_metadata *smm)
213 smm->recursion_count++;
216 static int apply_bops(struct sm_metadata *smm)
220 while (!brb_empty(&smm->uncommitted)) {
223 r = brb_peek(&smm->uncommitted, &bop);
225 DMERR("bug in bop ring buffer");
229 r = commit_bop(smm, &bop);
233 brb_pop(&smm->uncommitted);
239 static int out(struct sm_metadata *smm)
244 * If we're not recursing then very bad things are happening.
246 if (!smm->recursion_count) {
247 DMERR("lost track of recursion depth");
251 if (smm->recursion_count == 1)
254 smm->recursion_count--;
260 * When using the out() function above, we often want to combine an error
261 * code for the operation run in the recursive context with that from
264 static int combine_errors(int r1, int r2)
269 static int recursing(struct sm_metadata *smm)
271 return smm->recursion_count;
274 static void sm_metadata_destroy(struct dm_space_map *sm)
276 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
281 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
283 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
285 *count = smm->ll.nr_blocks;
290 static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
292 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
294 *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
295 smm->allocated_this_transaction;
300 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
305 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
306 unsigned adjustment = 0;
309 * We may have some uncommitted adjustments to add. This list
310 * should always be really short.
312 for (i = smm->uncommitted.begin;
313 i != smm->uncommitted.end;
314 i = brb_next(&smm->uncommitted, i)) {
315 struct block_op *op = smm->uncommitted.bops + i;
331 r = sm_ll_lookup(&smm->ll, b, result);
335 *result += adjustment;
340 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
341 dm_block_t b, int *result)
343 int r, adjustment = 0;
345 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
349 * We may have some uncommitted adjustments to add. This list
350 * should always be really short.
352 for (i = smm->uncommitted.begin;
353 i != smm->uncommitted.end;
354 i = brb_next(&smm->uncommitted, i)) {
356 struct block_op *op = smm->uncommitted.bops + i;
372 if (adjustment > 1) {
377 r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
383 * We err on the side of caution, and always return true.
387 *result = rc + adjustment > 1;
392 static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
396 enum allocation_event ev;
397 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
399 if (smm->recursion_count) {
400 DMERR("cannot recurse set_count()");
405 r = sm_ll_insert(&smm->ll, b, count, &ev);
408 return combine_errors(r, r2);
411 static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
414 enum allocation_event ev;
415 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
418 r = add_bop(smm, BOP_INC, b);
421 r = sm_ll_inc(&smm->ll, b, &ev);
425 return combine_errors(r, r2);
428 static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
431 enum allocation_event ev;
432 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
435 r = add_bop(smm, BOP_DEC, b);
438 r = sm_ll_dec(&smm->ll, b, &ev);
442 return combine_errors(r, r2);
445 static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
448 enum allocation_event ev;
449 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
452 * Any block we allocate has to be free in both the old and current ll.
454 r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
457 * There's no free block between smm->begin and the end of the metadata device.
458 * We search before smm->begin in case something has been freed.
460 r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
469 r = add_bop(smm, BOP_INC, *b);
472 r = sm_ll_inc(&smm->ll, *b, &ev);
477 smm->allocated_this_transaction++;
479 return combine_errors(r, r2);
482 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
485 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
487 int r = sm_metadata_new_block_(sm, b);
489 DMERR_LIMIT("unable to allocate new metadata block");
493 r = sm_metadata_get_nr_free(sm, &count);
495 DMERR_LIMIT("couldn't get free block count");
499 check_threshold(&smm->threshold, count);
504 static int sm_metadata_commit(struct dm_space_map *sm)
507 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
509 r = sm_ll_commit(&smm->ll);
513 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
514 smm->allocated_this_transaction = 0;
519 static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
520 dm_block_t threshold,
521 dm_sm_threshold_fn fn,
524 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
526 set_threshold(&smm->threshold, threshold, fn, context);
531 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
533 *result = sizeof(struct disk_sm_root);
538 static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
540 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
541 struct disk_sm_root root_le;
543 root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
544 root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
545 root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
546 root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
548 if (max < sizeof(root_le))
551 memcpy(where_le, &root_le, sizeof(root_le));
556 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
558 static const struct dm_space_map ops = {
559 .destroy = sm_metadata_destroy,
560 .extend = sm_metadata_extend,
561 .get_nr_blocks = sm_metadata_get_nr_blocks,
562 .get_nr_free = sm_metadata_get_nr_free,
563 .get_count = sm_metadata_get_count,
564 .count_is_more_than_one = sm_metadata_count_is_more_than_one,
565 .set_count = sm_metadata_set_count,
566 .inc_block = sm_metadata_inc_block,
567 .dec_block = sm_metadata_dec_block,
568 .new_block = sm_metadata_new_block,
569 .commit = sm_metadata_commit,
570 .root_size = sm_metadata_root_size,
571 .copy_root = sm_metadata_copy_root,
572 .register_threshold_callback = sm_metadata_register_threshold_callback
575 /*----------------------------------------------------------------*/
578 * When a new space map is created that manages its own space. We use
579 * this tiny bootstrap allocator.
581 static void sm_bootstrap_destroy(struct dm_space_map *sm)
585 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
587 DMERR("bootstrap doesn't support extend");
592 static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
594 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
596 *count = smm->ll.nr_blocks;
601 static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
603 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
605 *count = smm->ll.nr_blocks - smm->begin;
610 static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
613 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
615 *result = (b < smm->begin) ? 1 : 0;
620 static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
621 dm_block_t b, int *result)
628 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
631 DMERR("bootstrap doesn't support set_count");
636 static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
638 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
641 * We know the entire device is unused.
643 if (smm->begin == smm->ll.nr_blocks)
651 static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
653 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
655 return add_bop(smm, BOP_INC, b);
658 static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
660 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
662 return add_bop(smm, BOP_DEC, b);
665 static int sm_bootstrap_commit(struct dm_space_map *sm)
670 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
672 DMERR("bootstrap doesn't support root_size");
677 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
680 DMERR("bootstrap doesn't support copy_root");
685 static const struct dm_space_map bootstrap_ops = {
686 .destroy = sm_bootstrap_destroy,
687 .extend = sm_bootstrap_extend,
688 .get_nr_blocks = sm_bootstrap_get_nr_blocks,
689 .get_nr_free = sm_bootstrap_get_nr_free,
690 .get_count = sm_bootstrap_get_count,
691 .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
692 .set_count = sm_bootstrap_set_count,
693 .inc_block = sm_bootstrap_inc_block,
694 .dec_block = sm_bootstrap_dec_block,
695 .new_block = sm_bootstrap_new_block,
696 .commit = sm_bootstrap_commit,
697 .root_size = sm_bootstrap_root_size,
698 .copy_root = sm_bootstrap_copy_root,
699 .register_threshold_callback = NULL
702 /*----------------------------------------------------------------*/
704 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
707 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
708 dm_block_t old_len = smm->ll.nr_blocks;
711 * Flick into a mode where all blocks get allocated in the new area.
713 smm->begin = old_len;
714 memcpy(sm, &bootstrap_ops, sizeof(*sm));
719 r = sm_ll_extend(&smm->ll, extra_blocks);
724 * We repeatedly increment then commit until the commit doesn't
725 * allocate any new blocks.
728 for (i = old_len; !r && i < smm->begin; i++)
729 r = add_bop(smm, BOP_INC, i);
734 old_len = smm->begin;
738 DMERR("%s: apply_bops failed", __func__);
742 r = sm_ll_commit(&smm->ll);
746 } while (old_len != smm->begin);
750 * Switch back to normal behaviour.
752 memcpy(sm, &ops, sizeof(*sm));
756 /*----------------------------------------------------------------*/
758 struct dm_space_map *dm_sm_metadata_init(void)
760 struct sm_metadata *smm;
762 smm = kmalloc(sizeof(*smm), GFP_KERNEL);
764 return ERR_PTR(-ENOMEM);
766 memcpy(&smm->sm, &ops, sizeof(smm->sm));
771 int dm_sm_metadata_create(struct dm_space_map *sm,
772 struct dm_transaction_manager *tm,
773 dm_block_t nr_blocks,
774 dm_block_t superblock)
778 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
780 smm->begin = superblock + 1;
781 smm->recursion_count = 0;
782 smm->allocated_this_transaction = 0;
783 brb_init(&smm->uncommitted);
784 threshold_init(&smm->threshold);
786 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
788 r = sm_ll_new_metadata(&smm->ll, tm);
790 if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
791 nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
792 r = sm_ll_extend(&smm->ll, nr_blocks);
794 memcpy(&smm->sm, &ops, sizeof(smm->sm));
799 * Now we need to update the newly created data structures with the
800 * allocated blocks that they were built from.
802 for (i = superblock; !r && i < smm->begin; i++)
803 r = add_bop(smm, BOP_INC, i);
810 DMERR("%s: apply_bops failed", __func__);
814 return sm_metadata_commit(sm);
817 int dm_sm_metadata_open(struct dm_space_map *sm,
818 struct dm_transaction_manager *tm,
819 void *root_le, size_t len)
822 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
824 r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
829 smm->recursion_count = 0;
830 smm->allocated_this_transaction = 0;
831 brb_init(&smm->uncommitted);
832 threshold_init(&smm->threshold);
834 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));