2 * Copyright (C) 2015 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-background-tracker.h"
8 #include "dm-cache-policy-internal.h"
9 #include "dm-cache-policy.h"
12 #include <linux/hash.h>
13 #include <linux/jiffies.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/vmalloc.h>
17 #include <linux/math64.h>
19 #define DM_MSG_PREFIX "cache-policy-smq"
21 /*----------------------------------------------------------------*/
24 * Safe division functions that return zero on divide by zero.
26 static unsigned safe_div(unsigned n, unsigned d)
28 return d ? n / d : 0u;
31 static unsigned safe_mod(unsigned n, unsigned d)
33 return d ? n % d : 0u;
36 /*----------------------------------------------------------------*/
39 unsigned hash_next:28;
51 /*----------------------------------------------------------------*/
53 #define INDEXER_NULL ((1u << 28u) - 1u)
56 * An entry_space manages a set of entries that we use for the queues.
57 * The clean and dirty queues share entries, so this object is separate
58 * from the queue itself.
65 static int space_init(struct entry_space *es, unsigned nr_entries)
68 es->begin = es->end = NULL;
72 es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
76 es->end = es->begin + nr_entries;
80 static void space_exit(struct entry_space *es)
85 static struct entry *__get_entry(struct entry_space *es, unsigned block)
89 e = es->begin + block;
95 static unsigned to_index(struct entry_space *es, struct entry *e)
97 BUG_ON(e < es->begin || e >= es->end);
101 static struct entry *to_entry(struct entry_space *es, unsigned block)
103 if (block == INDEXER_NULL)
106 return __get_entry(es, block);
109 /*----------------------------------------------------------------*/
112 unsigned nr_elts; /* excluding sentinel entries */
116 static void l_init(struct ilist *l)
119 l->head = l->tail = INDEXER_NULL;
122 static struct entry *l_head(struct entry_space *es, struct ilist *l)
124 return to_entry(es, l->head);
127 static struct entry *l_tail(struct entry_space *es, struct ilist *l)
129 return to_entry(es, l->tail);
132 static struct entry *l_next(struct entry_space *es, struct entry *e)
134 return to_entry(es, e->next);
137 static struct entry *l_prev(struct entry_space *es, struct entry *e)
139 return to_entry(es, e->prev);
142 static bool l_empty(struct ilist *l)
144 return l->head == INDEXER_NULL;
147 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
149 struct entry *head = l_head(es, l);
152 e->prev = INDEXER_NULL;
155 head->prev = l->head = to_index(es, e);
157 l->head = l->tail = to_index(es, e);
163 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
165 struct entry *tail = l_tail(es, l);
167 e->next = INDEXER_NULL;
171 tail->next = l->tail = to_index(es, e);
173 l->head = l->tail = to_index(es, e);
179 static void l_add_before(struct entry_space *es, struct ilist *l,
180 struct entry *old, struct entry *e)
182 struct entry *prev = l_prev(es, old);
185 l_add_head(es, l, e);
189 e->next = to_index(es, old);
190 prev->next = old->prev = to_index(es, e);
197 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
199 struct entry *prev = l_prev(es, e);
200 struct entry *next = l_next(es, e);
203 prev->next = e->next;
208 next->prev = e->prev;
216 static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
220 for (e = l_head(es, l); e; e = l_next(es, e))
229 static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
233 for (e = l_tail(es, l); e; e = l_prev(es, e))
242 /*----------------------------------------------------------------*/
245 * The stochastic-multi-queue is a set of lru lists stacked into levels.
246 * Entries are moved up levels when they are used, which loosely orders the
247 * most accessed entries in the top levels and least in the bottom. This
248 * structure is *much* better than a single lru list.
250 #define MAX_LEVELS 64u
253 struct entry_space *es;
257 struct ilist qs[MAX_LEVELS];
260 * We maintain a count of the number of entries we would like in each
263 unsigned last_target_nr_elts;
264 unsigned nr_top_levels;
265 unsigned nr_in_top_levels;
266 unsigned target_count[MAX_LEVELS];
269 static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
275 q->nr_levels = nr_levels;
277 for (i = 0; i < q->nr_levels; i++) {
279 q->target_count[i] = 0u;
282 q->last_target_nr_elts = 0u;
283 q->nr_top_levels = 0u;
284 q->nr_in_top_levels = 0u;
287 static unsigned q_size(struct queue *q)
293 * Insert an entry to the back of the given level.
295 static void q_push(struct queue *q, struct entry *e)
297 BUG_ON(e->pending_work);
302 l_add_tail(q->es, q->qs + e->level, e);
305 static void q_push_front(struct queue *q, struct entry *e)
307 BUG_ON(e->pending_work);
312 l_add_head(q->es, q->qs + e->level, e);
315 static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
317 BUG_ON(e->pending_work);
322 l_add_before(q->es, q->qs + e->level, old, e);
325 static void q_del(struct queue *q, struct entry *e)
327 l_del(q->es, q->qs + e->level, e);
333 * Return the oldest entry of the lowest populated level.
335 static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
340 max_level = min(max_level, q->nr_levels);
342 for (level = 0; level < max_level; level++)
343 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
345 if (can_cross_sentinel)
357 static struct entry *q_pop(struct queue *q)
359 struct entry *e = q_peek(q, q->nr_levels, true);
368 * This function assumes there is a non-sentinel entry to pop. It's only
369 * used by redistribute, so we know this is true. It also doesn't adjust
370 * the q->nr_elts count.
372 static struct entry *__redist_pop_from(struct queue *q, unsigned level)
376 for (; level < q->nr_levels; level++)
377 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
379 l_del(q->es, q->qs + e->level, e);
386 static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
388 unsigned level, nr_levels, entries_per_level, remainder;
390 BUG_ON(lbegin > lend);
391 BUG_ON(lend > q->nr_levels);
392 nr_levels = lend - lbegin;
393 entries_per_level = safe_div(nr_elts, nr_levels);
394 remainder = safe_mod(nr_elts, nr_levels);
396 for (level = lbegin; level < lend; level++)
397 q->target_count[level] =
398 (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
402 * Typically we have fewer elements in the top few levels which allows us
403 * to adjust the promote threshold nicely.
405 static void q_set_targets(struct queue *q)
407 if (q->last_target_nr_elts == q->nr_elts)
410 q->last_target_nr_elts = q->nr_elts;
412 if (q->nr_top_levels > q->nr_levels)
413 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
416 q_set_targets_subrange_(q, q->nr_in_top_levels,
417 q->nr_levels - q->nr_top_levels, q->nr_levels);
419 if (q->nr_in_top_levels < q->nr_elts)
420 q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
421 0, q->nr_levels - q->nr_top_levels);
423 q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
427 static void q_redistribute(struct queue *q)
429 unsigned target, level;
430 struct ilist *l, *l_above;
435 for (level = 0u; level < q->nr_levels - 1u; level++) {
437 target = q->target_count[level];
440 * Pull down some entries from the level above.
442 while (l->nr_elts < target) {
443 e = __redist_pop_from(q, level + 1u);
450 l_add_tail(q->es, l, e);
454 * Push some entries up.
456 l_above = q->qs + level + 1u;
457 while (l->nr_elts > target) {
458 e = l_pop_tail(q->es, l);
464 e->level = level + 1u;
465 l_add_tail(q->es, l_above, e);
470 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
471 struct entry *s1, struct entry *s2)
474 unsigned sentinels_passed = 0;
475 unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
477 /* try and find an entry to swap with */
478 if (extra_levels && (e->level < q->nr_levels - 1u)) {
479 for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
484 de->level = e->level;
486 switch (sentinels_passed) {
488 q_push_before(q, s1, de);
492 q_push_before(q, s2, de);
504 e->level = new_level;
508 /*----------------------------------------------------------------*/
511 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
512 #define EIGHTH (1u << (FP_SHIFT - 3u))
515 unsigned hit_threshold;
526 static void stats_init(struct stats *s, unsigned nr_levels)
528 s->hit_threshold = (nr_levels * 3u) / 4u;
533 static void stats_reset(struct stats *s)
535 s->hits = s->misses = 0u;
538 static void stats_level_accessed(struct stats *s, unsigned level)
540 if (level >= s->hit_threshold)
546 static void stats_miss(struct stats *s)
552 * There are times when we don't have any confidence in the hotspot queue.
553 * Such as when a fresh cache is created and the blocks have been spread
554 * out across the levels, or if an io load changes. We detect this by
555 * seeing how often a lookup is in the top levels of the hotspot queue.
557 static enum performance stats_assess(struct stats *s)
559 unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
561 if (confidence < SIXTEENTH)
564 else if (confidence < EIGHTH)
571 /*----------------------------------------------------------------*/
573 struct smq_hash_table {
574 struct entry_space *es;
575 unsigned long long hash_bits;
580 * All cache entries are stored in a chained hash table. To save space we
581 * use indexing again, and only store indexes to the next entry.
583 static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
585 unsigned i, nr_buckets;
588 nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
589 ht->hash_bits = __ffs(nr_buckets);
591 ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
595 for (i = 0; i < nr_buckets; i++)
596 ht->buckets[i] = INDEXER_NULL;
601 static void h_exit(struct smq_hash_table *ht)
606 static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
608 return to_entry(ht->es, ht->buckets[bucket]);
611 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
613 return to_entry(ht->es, e->hash_next);
616 static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
618 e->hash_next = ht->buckets[bucket];
619 ht->buckets[bucket] = to_index(ht->es, e);
622 static void h_insert(struct smq_hash_table *ht, struct entry *e)
624 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
625 __h_insert(ht, h, e);
628 static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
634 for (e = h_head(ht, h); e; e = h_next(ht, e)) {
635 if (e->oblock == oblock)
644 static void __h_unlink(struct smq_hash_table *ht, unsigned h,
645 struct entry *e, struct entry *prev)
648 prev->hash_next = e->hash_next;
650 ht->buckets[h] = e->hash_next;
654 * Also moves each entry to the front of the bucket.
656 static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
658 struct entry *e, *prev;
659 unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
661 e = __h_lookup(ht, h, oblock, &prev);
664 * Move to the front because this entry is likely
667 __h_unlink(ht, h, e, prev);
668 __h_insert(ht, h, e);
674 static void h_remove(struct smq_hash_table *ht, struct entry *e)
676 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
680 * The down side of using a singly linked list is we have to
681 * iterate the bucket to remove an item.
683 e = __h_lookup(ht, h, e->oblock, &prev);
685 __h_unlink(ht, h, e, prev);
688 /*----------------------------------------------------------------*/
691 struct entry_space *es;
694 unsigned nr_allocated;
698 static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
699 unsigned begin, unsigned end)
704 ea->nr_allocated = 0u;
708 for (i = begin; i != end; i++)
709 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
712 static void init_entry(struct entry *e)
715 * We can't memset because that would clear the hotspot and
716 * sentinel bits which remain constant.
718 e->hash_next = INDEXER_NULL;
719 e->next = INDEXER_NULL;
720 e->prev = INDEXER_NULL;
722 e->dirty = true; /* FIXME: audit */
725 e->pending_work = false;
728 static struct entry *alloc_entry(struct entry_alloc *ea)
732 if (l_empty(&ea->free))
735 e = l_pop_head(ea->es, &ea->free);
743 * This assumes the cblock hasn't already been allocated.
745 static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
747 struct entry *e = __get_entry(ea->es, ea->begin + i);
749 BUG_ON(e->allocated);
751 l_del(ea->es, &ea->free, e);
758 static void free_entry(struct entry_alloc *ea, struct entry *e)
760 BUG_ON(!ea->nr_allocated);
761 BUG_ON(!e->allocated);
764 e->allocated = false;
765 l_add_tail(ea->es, &ea->free, e);
768 static bool allocator_empty(struct entry_alloc *ea)
770 return l_empty(&ea->free);
773 static unsigned get_index(struct entry_alloc *ea, struct entry *e)
775 return to_index(ea->es, e) - ea->begin;
778 static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
780 return __get_entry(ea->es, ea->begin + index);
783 /*----------------------------------------------------------------*/
785 #define NR_HOTSPOT_LEVELS 64u
786 #define NR_CACHE_LEVELS 64u
788 #define WRITEBACK_PERIOD (10ul * HZ)
789 #define DEMOTE_PERIOD (60ul * HZ)
791 #define HOTSPOT_UPDATE_PERIOD (HZ)
792 #define CACHE_UPDATE_PERIOD (60ul * HZ)
795 struct dm_cache_policy policy;
797 /* protects everything */
799 dm_cblock_t cache_size;
800 sector_t cache_block_size;
802 sector_t hotspot_block_size;
803 unsigned nr_hotspot_blocks;
804 unsigned cache_blocks_per_hotspot_block;
805 unsigned hotspot_level_jump;
807 struct entry_space es;
808 struct entry_alloc writeback_sentinel_alloc;
809 struct entry_alloc demote_sentinel_alloc;
810 struct entry_alloc hotspot_alloc;
811 struct entry_alloc cache_alloc;
813 unsigned long *hotspot_hit_bits;
814 unsigned long *cache_hit_bits;
817 * We maintain three queues of entries. The cache proper,
818 * consisting of a clean and dirty queue, containing the currently
819 * active mappings. The hotspot queue uses a larger block size to
820 * track blocks that are being hit frequently and potential
821 * candidates for promotion to the cache.
823 struct queue hotspot;
827 struct stats hotspot_stats;
828 struct stats cache_stats;
831 * Keeps track of time, incremented by the core. We use this to
832 * avoid attributing multiple hits within the same tick.
837 * The hash tables allows us to quickly find an entry by origin
840 struct smq_hash_table table;
841 struct smq_hash_table hotspot_table;
843 bool current_writeback_sentinels;
844 unsigned long next_writeback_period;
846 bool current_demote_sentinels;
847 unsigned long next_demote_period;
849 unsigned write_promote_level;
850 unsigned read_promote_level;
852 unsigned long next_hotspot_period;
853 unsigned long next_cache_period;
855 struct background_tracker *bg_work;
857 bool migrations_allowed:1;
860 * If this is set the policy will try and clean the whole cache
861 * even if the device is not idle.
866 /*----------------------------------------------------------------*/
868 static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
870 return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
873 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
875 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
878 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
880 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
883 static void __update_writeback_sentinels(struct smq_policy *mq)
886 struct queue *q = &mq->dirty;
887 struct entry *sentinel;
889 for (level = 0; level < q->nr_levels; level++) {
890 sentinel = writeback_sentinel(mq, level);
896 static void __update_demote_sentinels(struct smq_policy *mq)
899 struct queue *q = &mq->clean;
900 struct entry *sentinel;
902 for (level = 0; level < q->nr_levels; level++) {
903 sentinel = demote_sentinel(mq, level);
909 static void update_sentinels(struct smq_policy *mq)
911 if (time_after(jiffies, mq->next_writeback_period)) {
912 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
913 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
914 __update_writeback_sentinels(mq);
917 if (time_after(jiffies, mq->next_demote_period)) {
918 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
919 mq->current_demote_sentinels = !mq->current_demote_sentinels;
920 __update_demote_sentinels(mq);
924 static void __sentinels_init(struct smq_policy *mq)
927 struct entry *sentinel;
929 for (level = 0; level < NR_CACHE_LEVELS; level++) {
930 sentinel = writeback_sentinel(mq, level);
931 sentinel->level = level;
932 q_push(&mq->dirty, sentinel);
934 sentinel = demote_sentinel(mq, level);
935 sentinel->level = level;
936 q_push(&mq->clean, sentinel);
940 static void sentinels_init(struct smq_policy *mq)
942 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
943 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
945 mq->current_writeback_sentinels = false;
946 mq->current_demote_sentinels = false;
947 __sentinels_init(mq);
949 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
950 mq->current_demote_sentinels = !mq->current_demote_sentinels;
951 __sentinels_init(mq);
954 /*----------------------------------------------------------------*/
956 static void del_queue(struct smq_policy *mq, struct entry *e)
958 q_del(e->dirty ? &mq->dirty : &mq->clean, e);
961 static void push_queue(struct smq_policy *mq, struct entry *e)
964 q_push(&mq->dirty, e);
966 q_push(&mq->clean, e);
969 // !h, !q, a -> h, q, a
970 static void push(struct smq_policy *mq, struct entry *e)
972 h_insert(&mq->table, e);
973 if (!e->pending_work)
977 static void push_queue_front(struct smq_policy *mq, struct entry *e)
980 q_push_front(&mq->dirty, e);
982 q_push_front(&mq->clean, e);
985 static void push_front(struct smq_policy *mq, struct entry *e)
987 h_insert(&mq->table, e);
988 if (!e->pending_work)
989 push_queue_front(mq, e);
992 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
994 return to_cblock(get_index(&mq->cache_alloc, e));
997 static void requeue(struct smq_policy *mq, struct entry *e)
1000 * Pending work has temporarily been taken out of the queues.
1002 if (e->pending_work)
1005 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
1007 q_requeue(&mq->clean, e, 1u, NULL, NULL);
1011 q_requeue(&mq->dirty, e, 1u,
1012 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
1013 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
1017 static unsigned default_promote_level(struct smq_policy *mq)
1020 * The promote level depends on the current performance of the
1023 * If the cache is performing badly, then we can't afford
1024 * to promote much without causing performance to drop below that
1025 * of the origin device.
1027 * If the cache is performing well, then we don't need to promote
1028 * much. If it isn't broken, don't fix it.
1030 * If the cache is middling then we promote more.
1032 * This scheme reminds me of a graph of entropy vs probability of a
1035 static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
1037 unsigned hits = mq->cache_stats.hits;
1038 unsigned misses = mq->cache_stats.misses;
1039 unsigned index = safe_div(hits << 4u, hits + misses);
1040 return table[index];
1043 static void update_promote_levels(struct smq_policy *mq)
1046 * If there are unused cache entries then we want to be really
1049 unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
1050 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1052 threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1055 * If the hotspot queue is performing badly then we have little
1056 * confidence that we know which blocks to promote. So we cut down
1057 * the amount of promotions.
1059 switch (stats_assess(&mq->hotspot_stats)) {
1061 threshold_level /= 4u;
1065 threshold_level /= 2u;
1072 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1073 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
1077 * If the hotspot queue is performing badly, then we try and move entries
1078 * around more quickly.
1080 static void update_level_jump(struct smq_policy *mq)
1082 switch (stats_assess(&mq->hotspot_stats)) {
1084 mq->hotspot_level_jump = 4u;
1088 mq->hotspot_level_jump = 2u;
1092 mq->hotspot_level_jump = 1u;
1097 static void end_hotspot_period(struct smq_policy *mq)
1099 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1100 update_promote_levels(mq);
1102 if (time_after(jiffies, mq->next_hotspot_period)) {
1103 update_level_jump(mq);
1104 q_redistribute(&mq->hotspot);
1105 stats_reset(&mq->hotspot_stats);
1106 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1110 static void end_cache_period(struct smq_policy *mq)
1112 if (time_after(jiffies, mq->next_cache_period)) {
1113 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1115 q_redistribute(&mq->dirty);
1116 q_redistribute(&mq->clean);
1117 stats_reset(&mq->cache_stats);
1119 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1123 /*----------------------------------------------------------------*/
1126 * Targets are given as a percentage.
1128 #define CLEAN_TARGET 25u
1129 #define FREE_TARGET 25u
1131 static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
1133 return from_cblock(mq->cache_size) * p / 100u;
1136 static bool clean_target_met(struct smq_policy *mq, bool idle)
1139 * Cache entries may not be populated. So we cannot rely on the
1140 * size of the clean queue.
1142 if (idle || mq->cleaner) {
1144 * We'd like to clean everything.
1146 return q_size(&mq->dirty) == 0u;
1150 * If we're busy we don't worry about cleaning at all.
1155 static bool free_target_met(struct smq_policy *mq)
1159 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1160 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1161 percent_to_target(mq, FREE_TARGET);
1164 /*----------------------------------------------------------------*/
1166 static void mark_pending(struct smq_policy *mq, struct entry *e)
1168 BUG_ON(e->sentinel);
1169 BUG_ON(!e->allocated);
1170 BUG_ON(e->pending_work);
1171 e->pending_work = true;
1174 static void clear_pending(struct smq_policy *mq, struct entry *e)
1176 BUG_ON(!e->pending_work);
1177 e->pending_work = false;
1180 static void queue_writeback(struct smq_policy *mq, bool idle)
1183 struct policy_work work;
1186 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
1188 mark_pending(mq, e);
1189 q_del(&mq->dirty, e);
1191 work.op = POLICY_WRITEBACK;
1192 work.oblock = e->oblock;
1193 work.cblock = infer_cblock(mq, e);
1195 r = btracker_queue(mq->bg_work, &work, NULL);
1197 clear_pending(mq, e);
1198 q_push_front(&mq->dirty, e);
1203 static void queue_demotion(struct smq_policy *mq)
1206 struct policy_work work;
1209 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
1212 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1214 if (!clean_target_met(mq, true))
1215 queue_writeback(mq, false);
1219 mark_pending(mq, e);
1220 q_del(&mq->clean, e);
1222 work.op = POLICY_DEMOTE;
1223 work.oblock = e->oblock;
1224 work.cblock = infer_cblock(mq, e);
1225 r = btracker_queue(mq->bg_work, &work, NULL);
1227 clear_pending(mq, e);
1228 q_push_front(&mq->clean, e);
1232 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1233 struct policy_work **workp)
1237 struct policy_work work;
1239 if (!mq->migrations_allowed)
1242 if (allocator_empty(&mq->cache_alloc)) {
1244 * We always claim to be 'idle' to ensure some demotions happen
1245 * with continuous loads.
1247 if (!free_target_met(mq))
1252 if (btracker_promotion_already_present(mq->bg_work, oblock))
1256 * We allocate the entry now to reserve the cblock. If the
1257 * background work is aborted we must remember to free it.
1259 e = alloc_entry(&mq->cache_alloc);
1261 e->pending_work = true;
1262 work.op = POLICY_PROMOTE;
1263 work.oblock = oblock;
1264 work.cblock = infer_cblock(mq, e);
1265 r = btracker_queue(mq->bg_work, &work, workp);
1267 free_entry(&mq->cache_alloc, e);
1270 /*----------------------------------------------------------------*/
1272 enum promote_result {
1279 * Converts a boolean into a promote result.
1281 static enum promote_result maybe_promote(bool promote)
1283 return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1286 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1287 int data_dir, bool fast_promote)
1289 if (data_dir == WRITE) {
1290 if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1291 return PROMOTE_TEMPORARY;
1293 return maybe_promote(hs_e->level >= mq->write_promote_level);
1295 return maybe_promote(hs_e->level >= mq->read_promote_level);
1298 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1300 sector_t r = from_oblock(b);
1301 (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1302 return to_oblock(r);
1305 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
1308 dm_oblock_t hb = to_hblock(mq, b);
1309 struct entry *e = h_lookup(&mq->hotspot_table, hb);
1312 stats_level_accessed(&mq->hotspot_stats, e->level);
1314 hi = get_index(&mq->hotspot_alloc, e);
1315 q_requeue(&mq->hotspot, e,
1316 test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1317 0u : mq->hotspot_level_jump,
1321 stats_miss(&mq->hotspot_stats);
1323 e = alloc_entry(&mq->hotspot_alloc);
1325 e = q_pop(&mq->hotspot);
1327 h_remove(&mq->hotspot_table, e);
1328 hi = get_index(&mq->hotspot_alloc, e);
1329 clear_bit(hi, mq->hotspot_hit_bits);
1336 q_push(&mq->hotspot, e);
1337 h_insert(&mq->hotspot_table, e);
1344 /*----------------------------------------------------------------*/
1347 * Public interface, via the policy struct. See dm-cache-policy.h for a
1348 * description of these.
1351 static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1353 return container_of(p, struct smq_policy, policy);
1356 static void smq_destroy(struct dm_cache_policy *p)
1358 struct smq_policy *mq = to_smq_policy(p);
1360 btracker_destroy(mq->bg_work);
1361 h_exit(&mq->hotspot_table);
1363 free_bitset(mq->hotspot_hit_bits);
1364 free_bitset(mq->cache_hit_bits);
1365 space_exit(&mq->es);
1369 /*----------------------------------------------------------------*/
1371 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1372 int data_dir, bool fast_copy,
1373 struct policy_work **work, bool *background_work)
1375 struct entry *e, *hs_e;
1376 enum promote_result pr;
1378 *background_work = false;
1380 e = h_lookup(&mq->table, oblock);
1382 stats_level_accessed(&mq->cache_stats, e->level);
1385 *cblock = infer_cblock(mq, e);
1389 stats_miss(&mq->cache_stats);
1392 * The hotspot queue only gets updated with misses.
1394 hs_e = update_hotspot_queue(mq, oblock);
1396 pr = should_promote(mq, hs_e, data_dir, fast_copy);
1397 if (pr != PROMOTE_NOT) {
1398 queue_promotion(mq, oblock, work);
1399 *background_work = true;
1406 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1407 int data_dir, bool fast_copy,
1408 bool *background_work)
1411 unsigned long flags;
1412 struct smq_policy *mq = to_smq_policy(p);
1414 spin_lock_irqsave(&mq->lock, flags);
1415 r = __lookup(mq, oblock, cblock,
1416 data_dir, fast_copy,
1417 NULL, background_work);
1418 spin_unlock_irqrestore(&mq->lock, flags);
1423 static int smq_lookup_with_work(struct dm_cache_policy *p,
1424 dm_oblock_t oblock, dm_cblock_t *cblock,
1425 int data_dir, bool fast_copy,
1426 struct policy_work **work)
1429 bool background_queued;
1430 unsigned long flags;
1431 struct smq_policy *mq = to_smq_policy(p);
1433 spin_lock_irqsave(&mq->lock, flags);
1434 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1435 spin_unlock_irqrestore(&mq->lock, flags);
1440 static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1441 struct policy_work **result)
1444 unsigned long flags;
1445 struct smq_policy *mq = to_smq_policy(p);
1447 spin_lock_irqsave(&mq->lock, flags);
1448 r = btracker_issue(mq->bg_work, result);
1449 if (r == -ENODATA) {
1450 if (!clean_target_met(mq, idle)) {
1451 queue_writeback(mq, idle);
1452 r = btracker_issue(mq->bg_work, result);
1455 spin_unlock_irqrestore(&mq->lock, flags);
1461 * We need to clear any pending work flags that have been set, and in the
1462 * case of promotion free the entry for the destination cblock.
1464 static void __complete_background_work(struct smq_policy *mq,
1465 struct policy_work *work,
1468 struct entry *e = get_entry(&mq->cache_alloc,
1469 from_cblock(work->cblock));
1472 case POLICY_PROMOTE:
1474 clear_pending(mq, e);
1476 e->oblock = work->oblock;
1477 e->level = NR_CACHE_LEVELS - 1;
1481 free_entry(&mq->cache_alloc, e);
1489 h_remove(&mq->table, e);
1490 free_entry(&mq->cache_alloc, e);
1493 clear_pending(mq, e);
1499 case POLICY_WRITEBACK:
1501 clear_pending(mq, e);
1507 btracker_complete(mq->bg_work, work);
1510 static void smq_complete_background_work(struct dm_cache_policy *p,
1511 struct policy_work *work,
1514 unsigned long flags;
1515 struct smq_policy *mq = to_smq_policy(p);
1517 spin_lock_irqsave(&mq->lock, flags);
1518 __complete_background_work(mq, work, success);
1519 spin_unlock_irqrestore(&mq->lock, flags);
1522 // in_hash(oblock) -> in_hash(oblock)
1523 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1525 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1527 if (e->pending_work)
1536 static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1538 unsigned long flags;
1539 struct smq_policy *mq = to_smq_policy(p);
1541 spin_lock_irqsave(&mq->lock, flags);
1542 __smq_set_clear_dirty(mq, cblock, true);
1543 spin_unlock_irqrestore(&mq->lock, flags);
1546 static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1548 struct smq_policy *mq = to_smq_policy(p);
1549 unsigned long flags;
1551 spin_lock_irqsave(&mq->lock, flags);
1552 __smq_set_clear_dirty(mq, cblock, false);
1553 spin_unlock_irqrestore(&mq->lock, flags);
1556 static unsigned random_level(dm_cblock_t cblock)
1558 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1561 static int smq_load_mapping(struct dm_cache_policy *p,
1562 dm_oblock_t oblock, dm_cblock_t cblock,
1563 bool dirty, uint32_t hint, bool hint_valid)
1565 struct smq_policy *mq = to_smq_policy(p);
1568 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1571 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1572 e->pending_work = false;
1575 * When we load mappings we push ahead of both sentinels in order to
1576 * allow demotions and cleaning to occur immediately.
1583 static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1585 struct smq_policy *mq = to_smq_policy(p);
1586 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1591 // FIXME: what if this block has pending background work?
1593 h_remove(&mq->table, e);
1594 free_entry(&mq->cache_alloc, e);
1598 static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1600 struct smq_policy *mq = to_smq_policy(p);
1601 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1609 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1612 unsigned long flags;
1613 struct smq_policy *mq = to_smq_policy(p);
1615 spin_lock_irqsave(&mq->lock, flags);
1616 r = to_cblock(mq->cache_alloc.nr_allocated);
1617 spin_unlock_irqrestore(&mq->lock, flags);
1622 static void smq_tick(struct dm_cache_policy *p, bool can_block)
1624 struct smq_policy *mq = to_smq_policy(p);
1625 unsigned long flags;
1627 spin_lock_irqsave(&mq->lock, flags);
1629 update_sentinels(mq);
1630 end_hotspot_period(mq);
1631 end_cache_period(mq);
1632 spin_unlock_irqrestore(&mq->lock, flags);
1635 static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1637 struct smq_policy *mq = to_smq_policy(p);
1638 mq->migrations_allowed = allow;
1642 * smq has no config values, but the old mq policy did. To avoid breaking
1643 * software we continue to accept these configurables for the mq policy,
1644 * but they have no effect.
1646 static int mq_set_config_value(struct dm_cache_policy *p,
1647 const char *key, const char *value)
1651 if (kstrtoul(value, 10, &tmp))
1654 if (!strcasecmp(key, "random_threshold") ||
1655 !strcasecmp(key, "sequential_threshold") ||
1656 !strcasecmp(key, "discard_promote_adjustment") ||
1657 !strcasecmp(key, "read_promote_adjustment") ||
1658 !strcasecmp(key, "write_promote_adjustment")) {
1659 DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
1666 static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
1667 unsigned maxlen, ssize_t *sz_ptr)
1669 ssize_t sz = *sz_ptr;
1671 DMEMIT("10 random_threshold 0 "
1672 "sequential_threshold 0 "
1673 "discard_promote_adjustment 0 "
1674 "read_promote_adjustment 0 "
1675 "write_promote_adjustment 0 ");
1681 /* Init the policy plugin interface function pointers. */
1682 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
1684 mq->policy.destroy = smq_destroy;
1685 mq->policy.lookup = smq_lookup;
1686 mq->policy.lookup_with_work = smq_lookup_with_work;
1687 mq->policy.get_background_work = smq_get_background_work;
1688 mq->policy.complete_background_work = smq_complete_background_work;
1689 mq->policy.set_dirty = smq_set_dirty;
1690 mq->policy.clear_dirty = smq_clear_dirty;
1691 mq->policy.load_mapping = smq_load_mapping;
1692 mq->policy.invalidate_mapping = smq_invalidate_mapping;
1693 mq->policy.get_hint = smq_get_hint;
1694 mq->policy.residency = smq_residency;
1695 mq->policy.tick = smq_tick;
1696 mq->policy.allow_migrations = smq_allow_migrations;
1699 mq->policy.set_config_value = mq_set_config_value;
1700 mq->policy.emit_config_values = mq_emit_config_values;
1704 static bool too_many_hotspot_blocks(sector_t origin_size,
1705 sector_t hotspot_block_size,
1706 unsigned nr_hotspot_blocks)
1708 return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1711 static void calc_hotspot_params(sector_t origin_size,
1712 sector_t cache_block_size,
1713 unsigned nr_cache_blocks,
1714 sector_t *hotspot_block_size,
1715 unsigned *nr_hotspot_blocks)
1717 *hotspot_block_size = cache_block_size * 16u;
1718 *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1720 while ((*hotspot_block_size > cache_block_size) &&
1721 too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1722 *hotspot_block_size /= 2u;
1725 static struct dm_cache_policy *
1726 __smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
1727 bool mimic_mq, bool migrations_allowed, bool cleaner)
1730 unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1731 unsigned total_sentinels = 2u * nr_sentinels_per_queue;
1732 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1737 init_policy_functions(mq, mimic_mq);
1738 mq->cache_size = cache_size;
1739 mq->cache_block_size = cache_block_size;
1741 calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1742 &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1744 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1745 mq->hotspot_level_jump = 1u;
1746 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1747 DMERR("couldn't initialize entry space");
1751 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
1752 for (i = 0; i < nr_sentinels_per_queue; i++)
1753 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1755 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
1756 for (i = 0; i < nr_sentinels_per_queue; i++)
1757 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1759 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1760 total_sentinels + mq->nr_hotspot_blocks);
1762 init_allocator(&mq->cache_alloc, &mq->es,
1763 total_sentinels + mq->nr_hotspot_blocks,
1764 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1766 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1767 if (!mq->hotspot_hit_bits) {
1768 DMERR("couldn't allocate hotspot hit bitset");
1769 goto bad_hotspot_hit_bits;
1771 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1773 if (from_cblock(cache_size)) {
1774 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1775 if (!mq->cache_hit_bits) {
1776 DMERR("couldn't allocate cache hit bitset");
1777 goto bad_cache_hit_bits;
1779 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1781 mq->cache_hit_bits = NULL;
1784 spin_lock_init(&mq->lock);
1786 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1787 mq->hotspot.nr_top_levels = 8;
1788 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1789 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1791 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1792 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1794 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1795 stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1797 if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1798 goto bad_alloc_table;
1800 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1801 goto bad_alloc_hotspot_table;
1804 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1806 mq->next_hotspot_period = jiffies;
1807 mq->next_cache_period = jiffies;
1809 mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
1813 mq->migrations_allowed = migrations_allowed;
1814 mq->cleaner = cleaner;
1819 h_exit(&mq->hotspot_table);
1820 bad_alloc_hotspot_table:
1823 free_bitset(mq->cache_hit_bits);
1825 free_bitset(mq->hotspot_hit_bits);
1826 bad_hotspot_hit_bits:
1827 space_exit(&mq->es);
1834 static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1835 sector_t origin_size,
1836 sector_t cache_block_size)
1838 return __smq_create(cache_size, origin_size, cache_block_size,
1839 false, true, false);
1842 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1843 sector_t origin_size,
1844 sector_t cache_block_size)
1846 return __smq_create(cache_size, origin_size, cache_block_size,
1850 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1851 sector_t origin_size,
1852 sector_t cache_block_size)
1854 return __smq_create(cache_size, origin_size, cache_block_size,
1855 false, false, true);
1858 /*----------------------------------------------------------------*/
1860 static struct dm_cache_policy_type smq_policy_type = {
1862 .version = {2, 0, 0},
1864 .owner = THIS_MODULE,
1865 .create = smq_create
1868 static struct dm_cache_policy_type mq_policy_type = {
1870 .version = {2, 0, 0},
1872 .owner = THIS_MODULE,
1873 .create = mq_create,
1876 static struct dm_cache_policy_type cleaner_policy_type = {
1878 .version = {2, 0, 0},
1880 .owner = THIS_MODULE,
1881 .create = cleaner_create,
1884 static struct dm_cache_policy_type default_policy_type = {
1886 .version = {2, 0, 0},
1888 .owner = THIS_MODULE,
1889 .create = smq_create,
1890 .real = &smq_policy_type
1893 static int __init smq_init(void)
1897 r = dm_cache_policy_register(&smq_policy_type);
1899 DMERR("register failed %d", r);
1903 r = dm_cache_policy_register(&mq_policy_type);
1905 DMERR("register failed (as mq) %d", r);
1909 r = dm_cache_policy_register(&cleaner_policy_type);
1911 DMERR("register failed (as cleaner) %d", r);
1915 r = dm_cache_policy_register(&default_policy_type);
1917 DMERR("register failed (as default) %d", r);
1924 dm_cache_policy_unregister(&cleaner_policy_type);
1926 dm_cache_policy_unregister(&mq_policy_type);
1928 dm_cache_policy_unregister(&smq_policy_type);
1933 static void __exit smq_exit(void)
1935 dm_cache_policy_unregister(&cleaner_policy_type);
1936 dm_cache_policy_unregister(&smq_policy_type);
1937 dm_cache_policy_unregister(&mq_policy_type);
1938 dm_cache_policy_unregister(&default_policy_type);
1941 module_init(smq_init);
1942 module_exit(smq_exit);
1944 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1945 MODULE_LICENSE("GPL");
1946 MODULE_DESCRIPTION("smq cache policy");
1948 MODULE_ALIAS("dm-cache-default");
1949 MODULE_ALIAS("dm-cache-mq");
1950 MODULE_ALIAS("dm-cache-cleaner");