2 * Copyright (C) 2015 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-background-tracker.h"
8 #include "dm-cache-policy-internal.h"
9 #include "dm-cache-policy.h"
12 #include <linux/hash.h>
13 #include <linux/jiffies.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/vmalloc.h>
17 #include <linux/math64.h>
19 #define DM_MSG_PREFIX "cache-policy-smq"
21 /*----------------------------------------------------------------*/
24 * Safe division functions that return zero on divide by zero.
26 static unsigned int safe_div(unsigned int n, unsigned int d)
28 return d ? n / d : 0u;
31 static unsigned int safe_mod(unsigned int n, unsigned int d)
33 return d ? n % d : 0u;
36 /*----------------------------------------------------------------*/
39 unsigned int hash_next:28;
51 /*----------------------------------------------------------------*/
53 #define INDEXER_NULL ((1u << 28u) - 1u)
56 * An entry_space manages a set of entries that we use for the queues.
57 * The clean and dirty queues share entries, so this object is separate
58 * from the queue itself.
65 static int space_init(struct entry_space *es, unsigned int nr_entries)
68 es->begin = es->end = NULL;
72 es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
76 es->end = es->begin + nr_entries;
80 static void space_exit(struct entry_space *es)
85 static struct entry *__get_entry(struct entry_space *es, unsigned int block)
89 e = es->begin + block;
95 static unsigned int to_index(struct entry_space *es, struct entry *e)
97 BUG_ON(e < es->begin || e >= es->end);
101 static struct entry *to_entry(struct entry_space *es, unsigned int block)
103 if (block == INDEXER_NULL)
106 return __get_entry(es, block);
109 /*----------------------------------------------------------------*/
112 unsigned int nr_elts; /* excluding sentinel entries */
113 unsigned int head, tail;
116 static void l_init(struct ilist *l)
119 l->head = l->tail = INDEXER_NULL;
122 static struct entry *l_head(struct entry_space *es, struct ilist *l)
124 return to_entry(es, l->head);
127 static struct entry *l_tail(struct entry_space *es, struct ilist *l)
129 return to_entry(es, l->tail);
132 static struct entry *l_next(struct entry_space *es, struct entry *e)
134 return to_entry(es, e->next);
137 static struct entry *l_prev(struct entry_space *es, struct entry *e)
139 return to_entry(es, e->prev);
142 static bool l_empty(struct ilist *l)
144 return l->head == INDEXER_NULL;
147 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
149 struct entry *head = l_head(es, l);
152 e->prev = INDEXER_NULL;
155 head->prev = l->head = to_index(es, e);
157 l->head = l->tail = to_index(es, e);
163 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
165 struct entry *tail = l_tail(es, l);
167 e->next = INDEXER_NULL;
171 tail->next = l->tail = to_index(es, e);
173 l->head = l->tail = to_index(es, e);
179 static void l_add_before(struct entry_space *es, struct ilist *l,
180 struct entry *old, struct entry *e)
182 struct entry *prev = l_prev(es, old);
185 l_add_head(es, l, e);
189 e->next = to_index(es, old);
190 prev->next = old->prev = to_index(es, e);
197 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
199 struct entry *prev = l_prev(es, e);
200 struct entry *next = l_next(es, e);
203 prev->next = e->next;
208 next->prev = e->prev;
216 static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
220 for (e = l_head(es, l); e; e = l_next(es, e))
229 static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
233 for (e = l_tail(es, l); e; e = l_prev(es, e))
242 /*----------------------------------------------------------------*/
245 * The stochastic-multi-queue is a set of lru lists stacked into levels.
246 * Entries are moved up levels when they are used, which loosely orders the
247 * most accessed entries in the top levels and least in the bottom. This
248 * structure is *much* better than a single lru list.
250 #define MAX_LEVELS 64u
253 struct entry_space *es;
255 unsigned int nr_elts;
256 unsigned int nr_levels;
257 struct ilist qs[MAX_LEVELS];
260 * We maintain a count of the number of entries we would like in each
263 unsigned int last_target_nr_elts;
264 unsigned int nr_top_levels;
265 unsigned int nr_in_top_levels;
266 unsigned int target_count[MAX_LEVELS];
269 static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
275 q->nr_levels = nr_levels;
277 for (i = 0; i < q->nr_levels; i++) {
279 q->target_count[i] = 0u;
282 q->last_target_nr_elts = 0u;
283 q->nr_top_levels = 0u;
284 q->nr_in_top_levels = 0u;
287 static unsigned int q_size(struct queue *q)
293 * Insert an entry to the back of the given level.
295 static void q_push(struct queue *q, struct entry *e)
297 BUG_ON(e->pending_work);
302 l_add_tail(q->es, q->qs + e->level, e);
305 static void q_push_front(struct queue *q, struct entry *e)
307 BUG_ON(e->pending_work);
312 l_add_head(q->es, q->qs + e->level, e);
315 static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
317 BUG_ON(e->pending_work);
322 l_add_before(q->es, q->qs + e->level, old, e);
325 static void q_del(struct queue *q, struct entry *e)
327 l_del(q->es, q->qs + e->level, e);
333 * Return the oldest entry of the lowest populated level.
335 static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
340 max_level = min(max_level, q->nr_levels);
342 for (level = 0; level < max_level; level++)
343 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
345 if (can_cross_sentinel)
357 static struct entry *q_pop(struct queue *q)
359 struct entry *e = q_peek(q, q->nr_levels, true);
368 * This function assumes there is a non-sentinel entry to pop. It's only
369 * used by redistribute, so we know this is true. It also doesn't adjust
370 * the q->nr_elts count.
372 static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
376 for (; level < q->nr_levels; level++)
377 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
379 l_del(q->es, q->qs + e->level, e);
386 static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
387 unsigned int lbegin, unsigned int lend)
389 unsigned int level, nr_levels, entries_per_level, remainder;
391 BUG_ON(lbegin > lend);
392 BUG_ON(lend > q->nr_levels);
393 nr_levels = lend - lbegin;
394 entries_per_level = safe_div(nr_elts, nr_levels);
395 remainder = safe_mod(nr_elts, nr_levels);
397 for (level = lbegin; level < lend; level++)
398 q->target_count[level] =
399 (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
403 * Typically we have fewer elements in the top few levels which allows us
404 * to adjust the promote threshold nicely.
406 static void q_set_targets(struct queue *q)
408 if (q->last_target_nr_elts == q->nr_elts)
411 q->last_target_nr_elts = q->nr_elts;
413 if (q->nr_top_levels > q->nr_levels)
414 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
417 q_set_targets_subrange_(q, q->nr_in_top_levels,
418 q->nr_levels - q->nr_top_levels, q->nr_levels);
420 if (q->nr_in_top_levels < q->nr_elts)
421 q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
422 0, q->nr_levels - q->nr_top_levels);
424 q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
428 static void q_redistribute(struct queue *q)
430 unsigned int target, level;
431 struct ilist *l, *l_above;
436 for (level = 0u; level < q->nr_levels - 1u; level++) {
438 target = q->target_count[level];
441 * Pull down some entries from the level above.
443 while (l->nr_elts < target) {
444 e = __redist_pop_from(q, level + 1u);
451 l_add_tail(q->es, l, e);
455 * Push some entries up.
457 l_above = q->qs + level + 1u;
458 while (l->nr_elts > target) {
459 e = l_pop_tail(q->es, l);
465 e->level = level + 1u;
466 l_add_tail(q->es, l_above, e);
471 static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
472 struct entry *s1, struct entry *s2)
475 unsigned int sentinels_passed = 0;
476 unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
478 /* try and find an entry to swap with */
479 if (extra_levels && (e->level < q->nr_levels - 1u)) {
480 for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
485 de->level = e->level;
487 switch (sentinels_passed) {
489 q_push_before(q, s1, de);
493 q_push_before(q, s2, de);
505 e->level = new_level;
509 /*----------------------------------------------------------------*/
512 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
513 #define EIGHTH (1u << (FP_SHIFT - 3u))
516 unsigned int hit_threshold;
527 static void stats_init(struct stats *s, unsigned int nr_levels)
529 s->hit_threshold = (nr_levels * 3u) / 4u;
534 static void stats_reset(struct stats *s)
536 s->hits = s->misses = 0u;
539 static void stats_level_accessed(struct stats *s, unsigned int level)
541 if (level >= s->hit_threshold)
547 static void stats_miss(struct stats *s)
553 * There are times when we don't have any confidence in the hotspot queue.
554 * Such as when a fresh cache is created and the blocks have been spread
555 * out across the levels, or if an io load changes. We detect this by
556 * seeing how often a lookup is in the top levels of the hotspot queue.
558 static enum performance stats_assess(struct stats *s)
560 unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
562 if (confidence < SIXTEENTH)
565 else if (confidence < EIGHTH)
572 /*----------------------------------------------------------------*/
574 struct smq_hash_table {
575 struct entry_space *es;
576 unsigned long long hash_bits;
577 unsigned int *buckets;
581 * All cache entries are stored in a chained hash table. To save space we
582 * use indexing again, and only store indexes to the next entry.
584 static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
586 unsigned int i, nr_buckets;
589 nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
590 ht->hash_bits = __ffs(nr_buckets);
592 ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
596 for (i = 0; i < nr_buckets; i++)
597 ht->buckets[i] = INDEXER_NULL;
602 static void h_exit(struct smq_hash_table *ht)
607 static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
609 return to_entry(ht->es, ht->buckets[bucket]);
612 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
614 return to_entry(ht->es, e->hash_next);
617 static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
619 e->hash_next = ht->buckets[bucket];
620 ht->buckets[bucket] = to_index(ht->es, e);
623 static void h_insert(struct smq_hash_table *ht, struct entry *e)
625 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
626 __h_insert(ht, h, e);
629 static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
635 for (e = h_head(ht, h); e; e = h_next(ht, e)) {
636 if (e->oblock == oblock)
645 static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
646 struct entry *e, struct entry *prev)
649 prev->hash_next = e->hash_next;
651 ht->buckets[h] = e->hash_next;
655 * Also moves each entry to the front of the bucket.
657 static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
659 struct entry *e, *prev;
660 unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
662 e = __h_lookup(ht, h, oblock, &prev);
665 * Move to the front because this entry is likely
668 __h_unlink(ht, h, e, prev);
669 __h_insert(ht, h, e);
675 static void h_remove(struct smq_hash_table *ht, struct entry *e)
677 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
681 * The down side of using a singly linked list is we have to
682 * iterate the bucket to remove an item.
684 e = __h_lookup(ht, h, e->oblock, &prev);
686 __h_unlink(ht, h, e, prev);
689 /*----------------------------------------------------------------*/
692 struct entry_space *es;
695 unsigned int nr_allocated;
699 static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
700 unsigned int begin, unsigned int end)
705 ea->nr_allocated = 0u;
709 for (i = begin; i != end; i++)
710 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
713 static void init_entry(struct entry *e)
716 * We can't memset because that would clear the hotspot and
717 * sentinel bits which remain constant.
719 e->hash_next = INDEXER_NULL;
720 e->next = INDEXER_NULL;
721 e->prev = INDEXER_NULL;
723 e->dirty = true; /* FIXME: audit */
726 e->pending_work = false;
729 static struct entry *alloc_entry(struct entry_alloc *ea)
733 if (l_empty(&ea->free))
736 e = l_pop_head(ea->es, &ea->free);
744 * This assumes the cblock hasn't already been allocated.
746 static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
748 struct entry *e = __get_entry(ea->es, ea->begin + i);
750 BUG_ON(e->allocated);
752 l_del(ea->es, &ea->free, e);
759 static void free_entry(struct entry_alloc *ea, struct entry *e)
761 BUG_ON(!ea->nr_allocated);
762 BUG_ON(!e->allocated);
765 e->allocated = false;
766 l_add_tail(ea->es, &ea->free, e);
769 static bool allocator_empty(struct entry_alloc *ea)
771 return l_empty(&ea->free);
774 static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
776 return to_index(ea->es, e) - ea->begin;
779 static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
781 return __get_entry(ea->es, ea->begin + index);
784 /*----------------------------------------------------------------*/
786 #define NR_HOTSPOT_LEVELS 64u
787 #define NR_CACHE_LEVELS 64u
789 #define WRITEBACK_PERIOD (10ul * HZ)
790 #define DEMOTE_PERIOD (60ul * HZ)
792 #define HOTSPOT_UPDATE_PERIOD (HZ)
793 #define CACHE_UPDATE_PERIOD (60ul * HZ)
796 struct dm_cache_policy policy;
798 /* protects everything */
800 dm_cblock_t cache_size;
801 sector_t cache_block_size;
803 sector_t hotspot_block_size;
804 unsigned int nr_hotspot_blocks;
805 unsigned int cache_blocks_per_hotspot_block;
806 unsigned int hotspot_level_jump;
808 struct entry_space es;
809 struct entry_alloc writeback_sentinel_alloc;
810 struct entry_alloc demote_sentinel_alloc;
811 struct entry_alloc hotspot_alloc;
812 struct entry_alloc cache_alloc;
814 unsigned long *hotspot_hit_bits;
815 unsigned long *cache_hit_bits;
818 * We maintain three queues of entries. The cache proper,
819 * consisting of a clean and dirty queue, containing the currently
820 * active mappings. The hotspot queue uses a larger block size to
821 * track blocks that are being hit frequently and potential
822 * candidates for promotion to the cache.
824 struct queue hotspot;
828 struct stats hotspot_stats;
829 struct stats cache_stats;
832 * Keeps track of time, incremented by the core. We use this to
833 * avoid attributing multiple hits within the same tick.
838 * The hash tables allows us to quickly find an entry by origin
841 struct smq_hash_table table;
842 struct smq_hash_table hotspot_table;
844 bool current_writeback_sentinels;
845 unsigned long next_writeback_period;
847 bool current_demote_sentinels;
848 unsigned long next_demote_period;
850 unsigned int write_promote_level;
851 unsigned int read_promote_level;
853 unsigned long next_hotspot_period;
854 unsigned long next_cache_period;
856 struct background_tracker *bg_work;
858 bool migrations_allowed;
861 /*----------------------------------------------------------------*/
863 static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
865 return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
868 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
870 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
873 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
875 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
878 static void __update_writeback_sentinels(struct smq_policy *mq)
881 struct queue *q = &mq->dirty;
882 struct entry *sentinel;
884 for (level = 0; level < q->nr_levels; level++) {
885 sentinel = writeback_sentinel(mq, level);
891 static void __update_demote_sentinels(struct smq_policy *mq)
894 struct queue *q = &mq->clean;
895 struct entry *sentinel;
897 for (level = 0; level < q->nr_levels; level++) {
898 sentinel = demote_sentinel(mq, level);
904 static void update_sentinels(struct smq_policy *mq)
906 if (time_after(jiffies, mq->next_writeback_period)) {
907 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
908 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
909 __update_writeback_sentinels(mq);
912 if (time_after(jiffies, mq->next_demote_period)) {
913 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
914 mq->current_demote_sentinels = !mq->current_demote_sentinels;
915 __update_demote_sentinels(mq);
919 static void __sentinels_init(struct smq_policy *mq)
922 struct entry *sentinel;
924 for (level = 0; level < NR_CACHE_LEVELS; level++) {
925 sentinel = writeback_sentinel(mq, level);
926 sentinel->level = level;
927 q_push(&mq->dirty, sentinel);
929 sentinel = demote_sentinel(mq, level);
930 sentinel->level = level;
931 q_push(&mq->clean, sentinel);
935 static void sentinels_init(struct smq_policy *mq)
937 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
938 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
940 mq->current_writeback_sentinels = false;
941 mq->current_demote_sentinels = false;
942 __sentinels_init(mq);
944 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
945 mq->current_demote_sentinels = !mq->current_demote_sentinels;
946 __sentinels_init(mq);
949 /*----------------------------------------------------------------*/
951 static void del_queue(struct smq_policy *mq, struct entry *e)
953 q_del(e->dirty ? &mq->dirty : &mq->clean, e);
956 static void push_queue(struct smq_policy *mq, struct entry *e)
959 q_push(&mq->dirty, e);
961 q_push(&mq->clean, e);
964 // !h, !q, a -> h, q, a
965 static void push(struct smq_policy *mq, struct entry *e)
967 h_insert(&mq->table, e);
968 if (!e->pending_work)
972 static void push_queue_front(struct smq_policy *mq, struct entry *e)
975 q_push_front(&mq->dirty, e);
977 q_push_front(&mq->clean, e);
980 static void push_front(struct smq_policy *mq, struct entry *e)
982 h_insert(&mq->table, e);
983 if (!e->pending_work)
984 push_queue_front(mq, e);
987 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
989 return to_cblock(get_index(&mq->cache_alloc, e));
992 static void requeue(struct smq_policy *mq, struct entry *e)
995 * Pending work has temporarily been taken out of the queues.
1000 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
1002 q_requeue(&mq->clean, e, 1u, NULL, NULL);
1006 q_requeue(&mq->dirty, e, 1u,
1007 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
1008 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
1012 static unsigned int default_promote_level(struct smq_policy *mq)
1015 * The promote level depends on the current performance of the
1018 * If the cache is performing badly, then we can't afford
1019 * to promote much without causing performance to drop below that
1020 * of the origin device.
1022 * If the cache is performing well, then we don't need to promote
1023 * much. If it isn't broken, don't fix it.
1025 * If the cache is middling then we promote more.
1027 * This scheme reminds me of a graph of entropy vs probability of a
1030 static const unsigned int table[] = {
1031 1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
1034 unsigned int hits = mq->cache_stats.hits;
1035 unsigned int misses = mq->cache_stats.misses;
1036 unsigned int index = safe_div(hits << 4u, hits + misses);
1037 return table[index];
1040 static void update_promote_levels(struct smq_policy *mq)
1043 * If there are unused cache entries then we want to be really
1046 unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
1047 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1049 threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1052 * If the hotspot queue is performing badly then we have little
1053 * confidence that we know which blocks to promote. So we cut down
1054 * the amount of promotions.
1056 switch (stats_assess(&mq->hotspot_stats)) {
1058 threshold_level /= 4u;
1062 threshold_level /= 2u;
1069 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1070 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
1074 * If the hotspot queue is performing badly, then we try and move entries
1075 * around more quickly.
1077 static void update_level_jump(struct smq_policy *mq)
1079 switch (stats_assess(&mq->hotspot_stats)) {
1081 mq->hotspot_level_jump = 4u;
1085 mq->hotspot_level_jump = 2u;
1089 mq->hotspot_level_jump = 1u;
1094 static void end_hotspot_period(struct smq_policy *mq)
1096 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1097 update_promote_levels(mq);
1099 if (time_after(jiffies, mq->next_hotspot_period)) {
1100 update_level_jump(mq);
1101 q_redistribute(&mq->hotspot);
1102 stats_reset(&mq->hotspot_stats);
1103 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1107 static void end_cache_period(struct smq_policy *mq)
1109 if (time_after(jiffies, mq->next_cache_period)) {
1110 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1112 q_redistribute(&mq->dirty);
1113 q_redistribute(&mq->clean);
1114 stats_reset(&mq->cache_stats);
1116 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1120 /*----------------------------------------------------------------*/
1123 * Targets are given as a percentage.
1125 #define CLEAN_TARGET 25u
1126 #define FREE_TARGET 25u
1128 static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
1130 return from_cblock(mq->cache_size) * p / 100u;
1133 static bool clean_target_met(struct smq_policy *mq, bool idle)
1136 * Cache entries may not be populated. So we cannot rely on the
1137 * size of the clean queue.
1141 * We'd like to clean everything.
1143 return q_size(&mq->dirty) == 0u;
1147 * If we're busy we don't worry about cleaning at all.
1152 static bool free_target_met(struct smq_policy *mq)
1154 unsigned int nr_free;
1156 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1157 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1158 percent_to_target(mq, FREE_TARGET);
1161 /*----------------------------------------------------------------*/
1163 static void mark_pending(struct smq_policy *mq, struct entry *e)
1165 BUG_ON(e->sentinel);
1166 BUG_ON(!e->allocated);
1167 BUG_ON(e->pending_work);
1168 e->pending_work = true;
1171 static void clear_pending(struct smq_policy *mq, struct entry *e)
1173 BUG_ON(!e->pending_work);
1174 e->pending_work = false;
1177 static void queue_writeback(struct smq_policy *mq, bool idle)
1180 struct policy_work work;
1183 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
1185 mark_pending(mq, e);
1186 q_del(&mq->dirty, e);
1188 work.op = POLICY_WRITEBACK;
1189 work.oblock = e->oblock;
1190 work.cblock = infer_cblock(mq, e);
1192 r = btracker_queue(mq->bg_work, &work, NULL);
1194 clear_pending(mq, e);
1195 q_push_front(&mq->dirty, e);
1200 static void queue_demotion(struct smq_policy *mq)
1203 struct policy_work work;
1206 if (WARN_ON_ONCE(!mq->migrations_allowed))
1209 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1211 if (!clean_target_met(mq, true))
1212 queue_writeback(mq, false);
1216 mark_pending(mq, e);
1217 q_del(&mq->clean, e);
1219 work.op = POLICY_DEMOTE;
1220 work.oblock = e->oblock;
1221 work.cblock = infer_cblock(mq, e);
1222 r = btracker_queue(mq->bg_work, &work, NULL);
1224 clear_pending(mq, e);
1225 q_push_front(&mq->clean, e);
1229 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1230 struct policy_work **workp)
1234 struct policy_work work;
1236 if (!mq->migrations_allowed)
1239 if (allocator_empty(&mq->cache_alloc)) {
1241 * We always claim to be 'idle' to ensure some demotions happen
1242 * with continuous loads.
1244 if (!free_target_met(mq))
1249 if (btracker_promotion_already_present(mq->bg_work, oblock))
1253 * We allocate the entry now to reserve the cblock. If the
1254 * background work is aborted we must remember to free it.
1256 e = alloc_entry(&mq->cache_alloc);
1258 e->pending_work = true;
1259 work.op = POLICY_PROMOTE;
1260 work.oblock = oblock;
1261 work.cblock = infer_cblock(mq, e);
1262 r = btracker_queue(mq->bg_work, &work, workp);
1264 free_entry(&mq->cache_alloc, e);
1267 /*----------------------------------------------------------------*/
1269 enum promote_result {
1276 * Converts a boolean into a promote result.
1278 static enum promote_result maybe_promote(bool promote)
1280 return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1283 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1284 int data_dir, bool fast_promote)
1286 if (data_dir == WRITE) {
1287 if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1288 return PROMOTE_TEMPORARY;
1290 return maybe_promote(hs_e->level >= mq->write_promote_level);
1292 return maybe_promote(hs_e->level >= mq->read_promote_level);
1295 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1297 sector_t r = from_oblock(b);
1298 (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1299 return to_oblock(r);
1302 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
1305 dm_oblock_t hb = to_hblock(mq, b);
1306 struct entry *e = h_lookup(&mq->hotspot_table, hb);
1309 stats_level_accessed(&mq->hotspot_stats, e->level);
1311 hi = get_index(&mq->hotspot_alloc, e);
1312 q_requeue(&mq->hotspot, e,
1313 test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1314 0u : mq->hotspot_level_jump,
1318 stats_miss(&mq->hotspot_stats);
1320 e = alloc_entry(&mq->hotspot_alloc);
1322 e = q_pop(&mq->hotspot);
1324 h_remove(&mq->hotspot_table, e);
1325 hi = get_index(&mq->hotspot_alloc, e);
1326 clear_bit(hi, mq->hotspot_hit_bits);
1333 q_push(&mq->hotspot, e);
1334 h_insert(&mq->hotspot_table, e);
1341 /*----------------------------------------------------------------*/
1344 * Public interface, via the policy struct. See dm-cache-policy.h for a
1345 * description of these.
1348 static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1350 return container_of(p, struct smq_policy, policy);
1353 static void smq_destroy(struct dm_cache_policy *p)
1355 struct smq_policy *mq = to_smq_policy(p);
1357 btracker_destroy(mq->bg_work);
1358 h_exit(&mq->hotspot_table);
1360 free_bitset(mq->hotspot_hit_bits);
1361 free_bitset(mq->cache_hit_bits);
1362 space_exit(&mq->es);
1366 /*----------------------------------------------------------------*/
1368 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1369 int data_dir, bool fast_copy,
1370 struct policy_work **work, bool *background_work)
1372 struct entry *e, *hs_e;
1373 enum promote_result pr;
1375 *background_work = false;
1377 e = h_lookup(&mq->table, oblock);
1379 stats_level_accessed(&mq->cache_stats, e->level);
1382 *cblock = infer_cblock(mq, e);
1386 stats_miss(&mq->cache_stats);
1389 * The hotspot queue only gets updated with misses.
1391 hs_e = update_hotspot_queue(mq, oblock);
1393 pr = should_promote(mq, hs_e, data_dir, fast_copy);
1394 if (pr != PROMOTE_NOT) {
1395 queue_promotion(mq, oblock, work);
1396 *background_work = true;
1403 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1404 int data_dir, bool fast_copy,
1405 bool *background_work)
1408 unsigned long flags;
1409 struct smq_policy *mq = to_smq_policy(p);
1411 spin_lock_irqsave(&mq->lock, flags);
1412 r = __lookup(mq, oblock, cblock,
1413 data_dir, fast_copy,
1414 NULL, background_work);
1415 spin_unlock_irqrestore(&mq->lock, flags);
1420 static int smq_lookup_with_work(struct dm_cache_policy *p,
1421 dm_oblock_t oblock, dm_cblock_t *cblock,
1422 int data_dir, bool fast_copy,
1423 struct policy_work **work)
1426 bool background_queued;
1427 unsigned long flags;
1428 struct smq_policy *mq = to_smq_policy(p);
1430 spin_lock_irqsave(&mq->lock, flags);
1431 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1432 spin_unlock_irqrestore(&mq->lock, flags);
1437 static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1438 struct policy_work **result)
1441 unsigned long flags;
1442 struct smq_policy *mq = to_smq_policy(p);
1444 spin_lock_irqsave(&mq->lock, flags);
1445 r = btracker_issue(mq->bg_work, result);
1446 if (r == -ENODATA) {
1447 if (!clean_target_met(mq, idle)) {
1448 queue_writeback(mq, idle);
1449 r = btracker_issue(mq->bg_work, result);
1452 spin_unlock_irqrestore(&mq->lock, flags);
1458 * We need to clear any pending work flags that have been set, and in the
1459 * case of promotion free the entry for the destination cblock.
1461 static void __complete_background_work(struct smq_policy *mq,
1462 struct policy_work *work,
1465 struct entry *e = get_entry(&mq->cache_alloc,
1466 from_cblock(work->cblock));
1469 case POLICY_PROMOTE:
1471 clear_pending(mq, e);
1473 e->oblock = work->oblock;
1474 e->level = NR_CACHE_LEVELS - 1;
1478 free_entry(&mq->cache_alloc, e);
1486 h_remove(&mq->table, e);
1487 free_entry(&mq->cache_alloc, e);
1490 clear_pending(mq, e);
1496 case POLICY_WRITEBACK:
1498 clear_pending(mq, e);
1504 btracker_complete(mq->bg_work, work);
1507 static void smq_complete_background_work(struct dm_cache_policy *p,
1508 struct policy_work *work,
1511 unsigned long flags;
1512 struct smq_policy *mq = to_smq_policy(p);
1514 spin_lock_irqsave(&mq->lock, flags);
1515 __complete_background_work(mq, work, success);
1516 spin_unlock_irqrestore(&mq->lock, flags);
1519 // in_hash(oblock) -> in_hash(oblock)
1520 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1522 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1524 if (e->pending_work)
1533 static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1535 unsigned long flags;
1536 struct smq_policy *mq = to_smq_policy(p);
1538 spin_lock_irqsave(&mq->lock, flags);
1539 __smq_set_clear_dirty(mq, cblock, true);
1540 spin_unlock_irqrestore(&mq->lock, flags);
1543 static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1545 struct smq_policy *mq = to_smq_policy(p);
1546 unsigned long flags;
1548 spin_lock_irqsave(&mq->lock, flags);
1549 __smq_set_clear_dirty(mq, cblock, false);
1550 spin_unlock_irqrestore(&mq->lock, flags);
1553 static unsigned int random_level(dm_cblock_t cblock)
1555 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1558 static int smq_load_mapping(struct dm_cache_policy *p,
1559 dm_oblock_t oblock, dm_cblock_t cblock,
1560 bool dirty, uint32_t hint, bool hint_valid)
1562 struct smq_policy *mq = to_smq_policy(p);
1565 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1568 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1569 e->pending_work = false;
1572 * When we load mappings we push ahead of both sentinels in order to
1573 * allow demotions and cleaning to occur immediately.
1580 static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1582 struct smq_policy *mq = to_smq_policy(p);
1583 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1588 // FIXME: what if this block has pending background work?
1590 h_remove(&mq->table, e);
1591 free_entry(&mq->cache_alloc, e);
1595 static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1597 struct smq_policy *mq = to_smq_policy(p);
1598 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1606 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1609 unsigned long flags;
1610 struct smq_policy *mq = to_smq_policy(p);
1612 spin_lock_irqsave(&mq->lock, flags);
1613 r = to_cblock(mq->cache_alloc.nr_allocated);
1614 spin_unlock_irqrestore(&mq->lock, flags);
1619 static void smq_tick(struct dm_cache_policy *p, bool can_block)
1621 struct smq_policy *mq = to_smq_policy(p);
1622 unsigned long flags;
1624 spin_lock_irqsave(&mq->lock, flags);
1626 update_sentinels(mq);
1627 end_hotspot_period(mq);
1628 end_cache_period(mq);
1629 spin_unlock_irqrestore(&mq->lock, flags);
1632 static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1634 struct smq_policy *mq = to_smq_policy(p);
1635 mq->migrations_allowed = allow;
1639 * smq has no config values, but the old mq policy did. To avoid breaking
1640 * software we continue to accept these configurables for the mq policy,
1641 * but they have no effect.
1643 static int mq_set_config_value(struct dm_cache_policy *p,
1644 const char *key, const char *value)
1648 if (kstrtoul(value, 10, &tmp))
1651 if (!strcasecmp(key, "random_threshold") ||
1652 !strcasecmp(key, "sequential_threshold") ||
1653 !strcasecmp(key, "discard_promote_adjustment") ||
1654 !strcasecmp(key, "read_promote_adjustment") ||
1655 !strcasecmp(key, "write_promote_adjustment")) {
1656 DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
1663 static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
1664 unsigned int maxlen, ssize_t *sz_ptr)
1666 ssize_t sz = *sz_ptr;
1668 DMEMIT("10 random_threshold 0 "
1669 "sequential_threshold 0 "
1670 "discard_promote_adjustment 0 "
1671 "read_promote_adjustment 0 "
1672 "write_promote_adjustment 0 ");
1678 /* Init the policy plugin interface function pointers. */
1679 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
1681 mq->policy.destroy = smq_destroy;
1682 mq->policy.lookup = smq_lookup;
1683 mq->policy.lookup_with_work = smq_lookup_with_work;
1684 mq->policy.get_background_work = smq_get_background_work;
1685 mq->policy.complete_background_work = smq_complete_background_work;
1686 mq->policy.set_dirty = smq_set_dirty;
1687 mq->policy.clear_dirty = smq_clear_dirty;
1688 mq->policy.load_mapping = smq_load_mapping;
1689 mq->policy.invalidate_mapping = smq_invalidate_mapping;
1690 mq->policy.get_hint = smq_get_hint;
1691 mq->policy.residency = smq_residency;
1692 mq->policy.tick = smq_tick;
1693 mq->policy.allow_migrations = smq_allow_migrations;
1696 mq->policy.set_config_value = mq_set_config_value;
1697 mq->policy.emit_config_values = mq_emit_config_values;
1701 static bool too_many_hotspot_blocks(sector_t origin_size,
1702 sector_t hotspot_block_size,
1703 unsigned int nr_hotspot_blocks)
1705 return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1708 static void calc_hotspot_params(sector_t origin_size,
1709 sector_t cache_block_size,
1710 unsigned int nr_cache_blocks,
1711 sector_t *hotspot_block_size,
1712 unsigned int *nr_hotspot_blocks)
1714 *hotspot_block_size = cache_block_size * 16u;
1715 *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1717 while ((*hotspot_block_size > cache_block_size) &&
1718 too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1719 *hotspot_block_size /= 2u;
1722 static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
1723 sector_t origin_size,
1724 sector_t cache_block_size,
1726 bool migrations_allowed)
1729 unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1730 unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
1731 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1736 init_policy_functions(mq, mimic_mq);
1737 mq->cache_size = cache_size;
1738 mq->cache_block_size = cache_block_size;
1740 calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1741 &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1743 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1744 mq->hotspot_level_jump = 1u;
1745 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1746 DMERR("couldn't initialize entry space");
1750 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
1751 for (i = 0; i < nr_sentinels_per_queue; i++)
1752 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1754 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
1755 for (i = 0; i < nr_sentinels_per_queue; i++)
1756 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1758 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1759 total_sentinels + mq->nr_hotspot_blocks);
1761 init_allocator(&mq->cache_alloc, &mq->es,
1762 total_sentinels + mq->nr_hotspot_blocks,
1763 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1765 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1766 if (!mq->hotspot_hit_bits) {
1767 DMERR("couldn't allocate hotspot hit bitset");
1768 goto bad_hotspot_hit_bits;
1770 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1772 if (from_cblock(cache_size)) {
1773 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1774 if (!mq->cache_hit_bits) {
1775 DMERR("couldn't allocate cache hit bitset");
1776 goto bad_cache_hit_bits;
1778 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1780 mq->cache_hit_bits = NULL;
1783 spin_lock_init(&mq->lock);
1785 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1786 mq->hotspot.nr_top_levels = 8;
1787 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1788 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1790 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1791 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1793 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1794 stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1796 if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1797 goto bad_alloc_table;
1799 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1800 goto bad_alloc_hotspot_table;
1803 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1805 mq->next_hotspot_period = jiffies;
1806 mq->next_cache_period = jiffies;
1808 mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
1812 mq->migrations_allowed = migrations_allowed;
1817 h_exit(&mq->hotspot_table);
1818 bad_alloc_hotspot_table:
1821 free_bitset(mq->cache_hit_bits);
1823 free_bitset(mq->hotspot_hit_bits);
1824 bad_hotspot_hit_bits:
1825 space_exit(&mq->es);
1832 static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1833 sector_t origin_size,
1834 sector_t cache_block_size)
1836 return __smq_create(cache_size, origin_size, cache_block_size, false, true);
1839 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1840 sector_t origin_size,
1841 sector_t cache_block_size)
1843 return __smq_create(cache_size, origin_size, cache_block_size, true, true);
1846 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1847 sector_t origin_size,
1848 sector_t cache_block_size)
1850 return __smq_create(cache_size, origin_size, cache_block_size, false, false);
1853 /*----------------------------------------------------------------*/
1855 static struct dm_cache_policy_type smq_policy_type = {
1857 .version = {2, 0, 0},
1859 .owner = THIS_MODULE,
1860 .create = smq_create
1863 static struct dm_cache_policy_type mq_policy_type = {
1865 .version = {2, 0, 0},
1867 .owner = THIS_MODULE,
1868 .create = mq_create,
1871 static struct dm_cache_policy_type cleaner_policy_type = {
1873 .version = {2, 0, 0},
1875 .owner = THIS_MODULE,
1876 .create = cleaner_create,
1879 static struct dm_cache_policy_type default_policy_type = {
1881 .version = {2, 0, 0},
1883 .owner = THIS_MODULE,
1884 .create = smq_create,
1885 .real = &smq_policy_type
1888 static int __init smq_init(void)
1892 r = dm_cache_policy_register(&smq_policy_type);
1894 DMERR("register failed %d", r);
1898 r = dm_cache_policy_register(&mq_policy_type);
1900 DMERR("register failed (as mq) %d", r);
1904 r = dm_cache_policy_register(&cleaner_policy_type);
1906 DMERR("register failed (as cleaner) %d", r);
1910 r = dm_cache_policy_register(&default_policy_type);
1912 DMERR("register failed (as default) %d", r);
1919 dm_cache_policy_unregister(&cleaner_policy_type);
1921 dm_cache_policy_unregister(&mq_policy_type);
1923 dm_cache_policy_unregister(&smq_policy_type);
1928 static void __exit smq_exit(void)
1930 dm_cache_policy_unregister(&cleaner_policy_type);
1931 dm_cache_policy_unregister(&smq_policy_type);
1932 dm_cache_policy_unregister(&mq_policy_type);
1933 dm_cache_policy_unregister(&default_policy_type);
1936 module_init(smq_init);
1937 module_exit(smq_exit);
1939 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1940 MODULE_LICENSE("GPL");
1941 MODULE_DESCRIPTION("smq cache policy");
1943 MODULE_ALIAS("dm-cache-default");
1944 MODULE_ALIAS("dm-cache-mq");
1945 MODULE_ALIAS("dm-cache-cleaner");