1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Red Hat. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
19 #define DM_MSG_PREFIX "writecache"
21 #define HIGH_WATERMARK 50
22 #define LOW_WATERMARK 45
23 #define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
24 #define ENDIO_LATENCY 16
25 #define WRITEBACK_LATENCY 64
26 #define AUTOCOMMIT_BLOCKS_SSD 65536
27 #define AUTOCOMMIT_BLOCKS_PMEM 64
28 #define AUTOCOMMIT_MSEC 1000
30 #define BITMAP_GRANULARITY 65536
31 #if BITMAP_GRANULARITY < PAGE_SIZE
32 #undef BITMAP_GRANULARITY
33 #define BITMAP_GRANULARITY PAGE_SIZE
36 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
37 #define DM_WRITECACHE_HAS_PMEM
40 #ifdef DM_WRITECACHE_HAS_PMEM
41 #define pmem_assign(dest, src) \
43 typeof(dest) uniq = (src); \
44 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
47 #define pmem_assign(dest, src) ((dest) = (src))
50 #if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
51 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
54 #define MEMORY_SUPERBLOCK_MAGIC 0x23489321
55 #define MEMORY_SUPERBLOCK_VERSION 1
57 struct wc_memory_entry {
58 __le64 original_sector;
62 struct wc_memory_superblock {
74 struct wc_memory_entry entries[0];
78 struct rb_node rb_node;
80 unsigned short wc_list_contiguous;
81 bool write_in_progress
82 #if BITS_PER_LONG == 64
87 #if BITS_PER_LONG == 64
91 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
92 uint64_t original_sector;
97 #ifdef DM_WRITECACHE_HAS_PMEM
98 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
99 #define WC_MODE_FUA(wc) ((wc)->writeback_fua)
101 #define WC_MODE_PMEM(wc) false
102 #define WC_MODE_FUA(wc) false
104 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
106 struct dm_writecache {
108 struct list_head lru;
110 struct list_head freelist;
112 struct rb_root freetree;
113 struct wc_entry *current_free;
118 size_t freelist_size;
119 size_t writeback_size;
120 size_t freelist_high_watermark;
121 size_t freelist_low_watermark;
123 unsigned uncommitted_blocks;
124 unsigned autocommit_blocks;
125 unsigned max_writeback_jobs;
129 unsigned long autocommit_jiffies;
130 struct timer_list autocommit_timer;
131 struct wait_queue_head freelist_wait;
133 atomic_t bio_in_progress[2];
134 struct wait_queue_head bio_in_progress_wait[2];
136 struct dm_target *ti;
138 struct dm_dev *ssd_dev;
139 sector_t start_sector;
141 uint64_t memory_map_size;
142 size_t metadata_sectors;
145 sector_t data_device_sectors;
147 struct wc_entry *entries;
149 unsigned char block_size_bits;
152 bool writeback_fua:1;
154 bool overwrote_committed:1;
155 bool memory_vmapped:1;
157 bool start_sector_set:1;
158 bool high_wm_percent_set:1;
159 bool low_wm_percent_set:1;
160 bool max_writeback_jobs_set:1;
161 bool autocommit_blocks_set:1;
162 bool autocommit_time_set:1;
163 bool writeback_fua_set:1;
164 bool flush_on_suspend:1;
166 unsigned high_wm_percent_value;
167 unsigned low_wm_percent_value;
168 unsigned autocommit_time_value;
170 unsigned writeback_all;
171 struct workqueue_struct *writeback_wq;
172 struct work_struct writeback_work;
173 struct work_struct flush_work;
175 struct dm_io_client *dm_io;
177 raw_spinlock_t endio_list_lock;
178 struct list_head endio_list;
179 struct task_struct *endio_thread;
181 struct task_struct *flush_thread;
182 struct bio_list flush_list;
184 struct dm_kcopyd_client *dm_kcopyd;
185 unsigned long *dirty_bitmap;
186 unsigned dirty_bitmap_size;
188 struct bio_set bio_set;
192 #define WB_LIST_INLINE 16
194 struct writeback_struct {
195 struct list_head endio_entry;
196 struct dm_writecache *wc;
197 struct wc_entry **wc_list;
199 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
204 struct list_head endio_entry;
205 struct dm_writecache *wc;
211 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
212 "A percentage of time allocated for data copying");
214 static void wc_lock(struct dm_writecache *wc)
216 mutex_lock(&wc->lock);
219 static void wc_unlock(struct dm_writecache *wc)
221 mutex_unlock(&wc->lock);
224 #ifdef DM_WRITECACHE_HAS_PMEM
225 static int persistent_memory_claim(struct dm_writecache *wc)
235 wc->memory_vmapped = false;
237 if (!wc->ssd_dev->dax_dev) {
241 s = wc->memory_map_size;
247 if (p != s >> PAGE_SHIFT) {
252 offset = get_start_sect(wc->ssd_dev->bdev);
253 if (offset & (PAGE_SIZE / 512 - 1)) {
257 offset >>= PAGE_SHIFT - 9;
259 id = dax_read_lock();
261 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
263 wc->memory_map = NULL;
267 if (!pfn_t_has_page(pfn)) {
268 wc->memory_map = NULL;
274 wc->memory_map = NULL;
275 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
283 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
286 r = daa ? daa : -EINVAL;
289 if (!pfn_t_has_page(pfn)) {
293 while (daa-- && i < p) {
294 pages[i++] = pfn_t_to_page(pfn);
300 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
301 if (!wc->memory_map) {
306 wc->memory_vmapped = true;
311 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
312 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
323 static int persistent_memory_claim(struct dm_writecache *wc)
329 static void persistent_memory_release(struct dm_writecache *wc)
331 if (wc->memory_vmapped)
332 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
335 static struct page *persistent_memory_page(void *addr)
337 if (is_vmalloc_addr(addr))
338 return vmalloc_to_page(addr);
340 return virt_to_page(addr);
343 static unsigned persistent_memory_page_offset(void *addr)
345 return (unsigned long)addr & (PAGE_SIZE - 1);
348 static void persistent_memory_flush_cache(void *ptr, size_t size)
350 if (is_vmalloc_addr(ptr))
351 flush_kernel_vmap_range(ptr, size);
354 static void persistent_memory_invalidate_cache(void *ptr, size_t size)
356 if (is_vmalloc_addr(ptr))
357 invalidate_kernel_vmap_range(ptr, size);
360 static struct wc_memory_superblock *sb(struct dm_writecache *wc)
362 return wc->memory_map;
365 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
367 return &sb(wc)->entries[e->index];
370 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
372 return (char *)wc->block_start + (e->index << wc->block_size_bits);
375 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
377 return wc->start_sector + wc->metadata_sectors +
378 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
381 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
383 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
384 return e->original_sector;
386 return le64_to_cpu(memory_entry(wc, e)->original_sector);
390 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
392 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
395 return le64_to_cpu(memory_entry(wc, e)->seq_count);
399 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
401 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
404 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
407 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
408 uint64_t original_sector, uint64_t seq_count)
410 struct wc_memory_entry me;
411 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
412 e->original_sector = original_sector;
413 e->seq_count = seq_count;
415 me.original_sector = cpu_to_le64(original_sector);
416 me.seq_count = cpu_to_le64(seq_count);
417 pmem_assign(*memory_entry(wc, e), me);
420 #define writecache_error(wc, err, msg, arg...) \
422 if (!cmpxchg(&(wc)->error, 0, err)) \
424 wake_up(&(wc)->freelist_wait); \
427 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
429 static void writecache_flush_all_metadata(struct dm_writecache *wc)
431 if (!WC_MODE_PMEM(wc))
432 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
435 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
437 if (!WC_MODE_PMEM(wc))
438 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
442 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
445 struct dm_writecache *wc;
450 static void writecache_notify_io(unsigned long error, void *context)
452 struct io_notify *endio = context;
454 if (unlikely(error != 0))
455 writecache_error(endio->wc, -EIO, "error writing metadata");
456 BUG_ON(atomic_read(&endio->count) <= 0);
457 if (atomic_dec_and_test(&endio->count))
461 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
463 wait_event(wc->bio_in_progress_wait[direction],
464 !atomic_read(&wc->bio_in_progress[direction]));
467 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
469 struct dm_io_region region;
470 struct dm_io_request req;
471 struct io_notify endio = {
473 COMPLETION_INITIALIZER_ONSTACK(endio.c),
476 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
481 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
482 if (unlikely(i == bitmap_bits))
484 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
486 region.bdev = wc->ssd_dev->bdev;
487 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
488 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
490 if (unlikely(region.sector >= wc->metadata_sectors))
492 if (unlikely(region.sector + region.count > wc->metadata_sectors))
493 region.count = wc->metadata_sectors - region.sector;
495 region.sector += wc->start_sector;
496 atomic_inc(&endio.count);
497 req.bi_op = REQ_OP_WRITE;
498 req.bi_op_flags = REQ_SYNC;
499 req.mem.type = DM_IO_VMA;
500 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
501 req.client = wc->dm_io;
502 req.notify.fn = writecache_notify_io;
503 req.notify.context = &endio;
505 /* writing via async dm-io (implied by notify.fn above) won't return an error */
506 (void) dm_io(&req, 1, ®ion, NULL);
510 writecache_notify_io(0, &endio);
511 wait_for_completion_io(&endio.c);
514 writecache_wait_for_ios(wc, WRITE);
516 writecache_disk_flush(wc, wc->ssd_dev);
518 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
521 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
523 if (WC_MODE_PMEM(wc))
526 ssd_commit_flushed(wc, wait_for_ios);
529 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
532 struct dm_io_region region;
533 struct dm_io_request req;
535 region.bdev = dev->bdev;
538 req.bi_op = REQ_OP_WRITE;
539 req.bi_op_flags = REQ_PREFLUSH;
540 req.mem.type = DM_IO_KMEM;
541 req.mem.ptr.addr = NULL;
542 req.client = wc->dm_io;
543 req.notify.fn = NULL;
545 r = dm_io(&req, 1, ®ion, NULL);
547 writecache_error(wc, r, "error flushing metadata: %d", r);
550 #define WFE_RETURN_FOLLOWING 1
551 #define WFE_LOWEST_SEQ 2
553 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
554 uint64_t block, int flags)
557 struct rb_node *node = wc->tree.rb_node;
563 e = container_of(node, struct wc_entry, rb_node);
564 if (read_original_sector(wc, e) == block)
567 node = (read_original_sector(wc, e) >= block ?
568 e->rb_node.rb_left : e->rb_node.rb_right);
569 if (unlikely(!node)) {
570 if (!(flags & WFE_RETURN_FOLLOWING))
572 if (read_original_sector(wc, e) >= block) {
575 node = rb_next(&e->rb_node);
578 e = container_of(node, struct wc_entry, rb_node);
586 if (flags & WFE_LOWEST_SEQ)
587 node = rb_prev(&e->rb_node);
589 node = rb_next(&e->rb_node);
592 e2 = container_of(node, struct wc_entry, rb_node);
593 if (read_original_sector(wc, e2) != block)
599 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
602 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
605 e = container_of(*node, struct wc_entry, rb_node);
606 parent = &e->rb_node;
607 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
608 node = &parent->rb_left;
610 node = &parent->rb_right;
612 rb_link_node(&ins->rb_node, parent, node);
613 rb_insert_color(&ins->rb_node, &wc->tree);
614 list_add(&ins->lru, &wc->lru);
617 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
620 rb_erase(&e->rb_node, &wc->tree);
623 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
625 if (WC_MODE_SORT_FREELIST(wc)) {
626 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
627 if (unlikely(!*node))
628 wc->current_free = e;
631 if (&e->rb_node < *node)
632 node = &parent->rb_left;
634 node = &parent->rb_right;
636 rb_link_node(&e->rb_node, parent, node);
637 rb_insert_color(&e->rb_node, &wc->freetree);
639 list_add_tail(&e->lru, &wc->freelist);
644 static inline void writecache_verify_watermark(struct dm_writecache *wc)
646 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
647 queue_work(wc->writeback_wq, &wc->writeback_work);
650 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
654 if (WC_MODE_SORT_FREELIST(wc)) {
655 struct rb_node *next;
656 if (unlikely(!wc->current_free))
658 e = wc->current_free;
659 next = rb_next(&e->rb_node);
660 rb_erase(&e->rb_node, &wc->freetree);
662 next = rb_first(&wc->freetree);
663 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
665 if (unlikely(list_empty(&wc->freelist)))
667 e = container_of(wc->freelist.next, struct wc_entry, lru);
672 writecache_verify_watermark(wc);
677 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
679 writecache_unlink(wc, e);
680 writecache_add_to_freelist(wc, e);
681 clear_seq_count(wc, e);
682 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
683 if (unlikely(waitqueue_active(&wc->freelist_wait)))
684 wake_up(&wc->freelist_wait);
687 static void writecache_wait_on_freelist(struct dm_writecache *wc)
691 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
694 finish_wait(&wc->freelist_wait, &wait);
698 static void writecache_poison_lists(struct dm_writecache *wc)
701 * Catch incorrect access to these values while the device is suspended.
703 memset(&wc->tree, -1, sizeof wc->tree);
704 wc->lru.next = LIST_POISON1;
705 wc->lru.prev = LIST_POISON2;
706 wc->freelist.next = LIST_POISON1;
707 wc->freelist.prev = LIST_POISON2;
710 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
712 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
713 if (WC_MODE_PMEM(wc))
714 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
717 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
719 return read_seq_count(wc, e) < wc->seq_count;
722 static void writecache_flush(struct dm_writecache *wc)
724 struct wc_entry *e, *e2;
725 bool need_flush_after_free;
727 wc->uncommitted_blocks = 0;
728 del_timer(&wc->autocommit_timer);
730 if (list_empty(&wc->lru))
733 e = container_of(wc->lru.next, struct wc_entry, lru);
734 if (writecache_entry_is_committed(wc, e)) {
735 if (wc->overwrote_committed) {
736 writecache_wait_for_ios(wc, WRITE);
737 writecache_disk_flush(wc, wc->ssd_dev);
738 wc->overwrote_committed = false;
743 writecache_flush_entry(wc, e);
744 if (unlikely(e->lru.next == &wc->lru))
746 e2 = container_of(e->lru.next, struct wc_entry, lru);
747 if (writecache_entry_is_committed(wc, e2))
752 writecache_commit_flushed(wc, true);
755 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
756 writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
757 writecache_commit_flushed(wc, false);
759 wc->overwrote_committed = false;
761 need_flush_after_free = false;
763 /* Free another committed entry with lower seq-count */
764 struct rb_node *rb_node = rb_prev(&e->rb_node);
767 e2 = container_of(rb_node, struct wc_entry, rb_node);
768 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
769 likely(!e2->write_in_progress)) {
770 writecache_free_entry(wc, e2);
771 need_flush_after_free = true;
774 if (unlikely(e->lru.prev == &wc->lru))
776 e = container_of(e->lru.prev, struct wc_entry, lru);
780 if (need_flush_after_free)
781 writecache_commit_flushed(wc, false);
784 static void writecache_flush_work(struct work_struct *work)
786 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
789 writecache_flush(wc);
793 static void writecache_autocommit_timer(struct timer_list *t)
795 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
796 if (!writecache_has_error(wc))
797 queue_work(wc->writeback_wq, &wc->flush_work);
800 static void writecache_schedule_autocommit(struct dm_writecache *wc)
802 if (!timer_pending(&wc->autocommit_timer))
803 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
806 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
809 bool discarded_something = false;
811 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
815 while (read_original_sector(wc, e) < end) {
816 struct rb_node *node = rb_next(&e->rb_node);
818 if (likely(!e->write_in_progress)) {
819 if (!discarded_something) {
820 writecache_wait_for_ios(wc, READ);
821 writecache_wait_for_ios(wc, WRITE);
822 discarded_something = true;
824 if (!writecache_entry_is_committed(wc, e))
825 wc->uncommitted_blocks--;
826 writecache_free_entry(wc, e);
832 e = container_of(node, struct wc_entry, rb_node);
835 if (discarded_something)
836 writecache_commit_flushed(wc, false);
839 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
841 if (wc->writeback_size) {
842 writecache_wait_on_freelist(wc);
848 static void writecache_suspend(struct dm_target *ti)
850 struct dm_writecache *wc = ti->private;
851 bool flush_on_suspend;
853 del_timer_sync(&wc->autocommit_timer);
856 writecache_flush(wc);
857 flush_on_suspend = wc->flush_on_suspend;
858 if (flush_on_suspend) {
859 wc->flush_on_suspend = false;
861 queue_work(wc->writeback_wq, &wc->writeback_work);
865 drain_workqueue(wc->writeback_wq);
868 if (flush_on_suspend)
870 while (writecache_wait_for_writeback(wc));
872 if (WC_MODE_PMEM(wc))
873 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
875 writecache_poison_lists(wc);
880 static int writecache_alloc_entries(struct dm_writecache *wc)
886 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
889 for (b = 0; b < wc->n_blocks; b++) {
890 struct wc_entry *e = &wc->entries[b];
892 e->write_in_progress = false;
899 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
901 struct dm_io_region region;
902 struct dm_io_request req;
904 region.bdev = wc->ssd_dev->bdev;
905 region.sector = wc->start_sector;
906 region.count = n_sectors;
907 req.bi_op = REQ_OP_READ;
908 req.bi_op_flags = REQ_SYNC;
909 req.mem.type = DM_IO_VMA;
910 req.mem.ptr.vma = (char *)wc->memory_map;
911 req.client = wc->dm_io;
912 req.notify.fn = NULL;
914 return dm_io(&req, 1, ®ion, NULL);
917 static void writecache_resume(struct dm_target *ti)
919 struct dm_writecache *wc = ti->private;
921 bool need_flush = false;
927 wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
929 if (WC_MODE_PMEM(wc)) {
930 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
932 r = writecache_read_metadata(wc, wc->metadata_sectors);
934 size_t sb_entries_offset;
935 writecache_error(wc, r, "unable to read metadata: %d", r);
936 sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
937 memset((char *)wc->memory_map + sb_entries_offset, -1,
938 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
943 INIT_LIST_HEAD(&wc->lru);
944 if (WC_MODE_SORT_FREELIST(wc)) {
945 wc->freetree = RB_ROOT;
946 wc->current_free = NULL;
948 INIT_LIST_HEAD(&wc->freelist);
950 wc->freelist_size = 0;
952 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
954 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
955 sb_seq_count = cpu_to_le64(0);
957 wc->seq_count = le64_to_cpu(sb_seq_count);
959 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
960 for (b = 0; b < wc->n_blocks; b++) {
961 struct wc_entry *e = &wc->entries[b];
962 struct wc_memory_entry wme;
963 if (writecache_has_error(wc)) {
964 e->original_sector = -1;
968 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
970 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
971 (unsigned long)b, r);
972 e->original_sector = -1;
975 e->original_sector = le64_to_cpu(wme.original_sector);
976 e->seq_count = le64_to_cpu(wme.seq_count);
981 for (b = 0; b < wc->n_blocks; b++) {
982 struct wc_entry *e = &wc->entries[b];
983 if (!writecache_entry_is_committed(wc, e)) {
984 if (read_seq_count(wc, e) != -1) {
986 clear_seq_count(wc, e);
989 writecache_add_to_freelist(wc, e);
991 struct wc_entry *old;
993 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
995 writecache_insert_entry(wc, e);
997 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
998 writecache_error(wc, -EINVAL,
999 "two identical entries, position %llu, sector %llu, sequence %llu",
1000 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1001 (unsigned long long)read_seq_count(wc, e));
1003 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1006 writecache_free_entry(wc, old);
1007 writecache_insert_entry(wc, e);
1016 writecache_flush_all_metadata(wc);
1017 writecache_commit_flushed(wc, false);
1020 writecache_verify_watermark(wc);
1025 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1031 if (dm_suspended(wc->ti)) {
1035 if (writecache_has_error(wc)) {
1040 writecache_flush(wc);
1041 wc->writeback_all++;
1042 queue_work(wc->writeback_wq, &wc->writeback_work);
1045 flush_workqueue(wc->writeback_wq);
1048 wc->writeback_all--;
1049 if (writecache_has_error(wc)) {
1058 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1064 wc->flush_on_suspend = true;
1070 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1071 char *result, unsigned maxlen)
1074 struct dm_writecache *wc = ti->private;
1076 if (!strcasecmp(argv[0], "flush"))
1077 r = process_flush_mesg(argc, argv, wc);
1078 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1079 r = process_flush_on_suspend_mesg(argc, argv, wc);
1081 DMERR("unrecognised message received: %s", argv[0]);
1086 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1089 unsigned long flags;
1091 int rw = bio_data_dir(bio);
1092 unsigned remaining_size = wc->block_size;
1095 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1096 buf = bvec_kmap_irq(&bv, &flags);
1098 if (unlikely(size > remaining_size))
1099 size = remaining_size;
1103 r = memcpy_mcsafe(buf, data, size);
1104 flush_dcache_page(bio_page(bio));
1106 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1107 bio->bi_status = BLK_STS_IOERR;
1110 flush_dcache_page(bio_page(bio));
1111 memcpy_flushcache(data, buf, size);
1114 bvec_kunmap_irq(buf, &flags);
1116 data = (char *)data + size;
1117 remaining_size -= size;
1118 bio_advance(bio, size);
1119 } while (unlikely(remaining_size));
1122 static int writecache_flush_thread(void *data)
1124 struct dm_writecache *wc = data;
1130 bio = bio_list_pop(&wc->flush_list);
1132 set_current_state(TASK_INTERRUPTIBLE);
1135 if (unlikely(kthread_should_stop())) {
1136 set_current_state(TASK_RUNNING);
1144 if (bio_op(bio) == REQ_OP_DISCARD) {
1145 writecache_discard(wc, bio->bi_iter.bi_sector,
1146 bio_end_sector(bio));
1148 bio_set_dev(bio, wc->dev->bdev);
1149 generic_make_request(bio);
1151 writecache_flush(wc);
1153 if (writecache_has_error(wc))
1154 bio->bi_status = BLK_STS_IOERR;
1162 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1164 if (bio_list_empty(&wc->flush_list))
1165 wake_up_process(wc->flush_thread);
1166 bio_list_add(&wc->flush_list, bio);
1169 static int writecache_map(struct dm_target *ti, struct bio *bio)
1172 struct dm_writecache *wc = ti->private;
1174 bio->bi_private = NULL;
1178 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1179 if (writecache_has_error(wc))
1181 if (WC_MODE_PMEM(wc)) {
1182 writecache_flush(wc);
1183 if (writecache_has_error(wc))
1187 writecache_offload_bio(wc, bio);
1192 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1194 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1195 (wc->block_size / 512 - 1)) != 0)) {
1196 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1197 (unsigned long long)bio->bi_iter.bi_sector,
1198 bio->bi_iter.bi_size, wc->block_size);
1202 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1203 if (writecache_has_error(wc))
1205 if (WC_MODE_PMEM(wc)) {
1206 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1207 goto unlock_remap_origin;
1209 writecache_offload_bio(wc, bio);
1214 if (bio_data_dir(bio) == READ) {
1216 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1217 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1218 if (WC_MODE_PMEM(wc)) {
1219 bio_copy_block(wc, bio, memory_data(wc, e));
1220 if (bio->bi_iter.bi_size)
1221 goto read_next_block;
1224 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1225 bio_set_dev(bio, wc->ssd_dev->bdev);
1226 bio->bi_iter.bi_sector = cache_sector(wc, e);
1227 if (!writecache_entry_is_committed(wc, e))
1228 writecache_wait_for_ios(wc, WRITE);
1233 sector_t next_boundary =
1234 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1235 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1236 dm_accept_partial_bio(bio, next_boundary);
1239 goto unlock_remap_origin;
1243 if (writecache_has_error(wc))
1245 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1247 if (!writecache_entry_is_committed(wc, e))
1249 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1250 wc->overwrote_committed = true;
1254 e = writecache_pop_from_freelist(wc);
1256 writecache_wait_on_freelist(wc);
1259 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1260 writecache_insert_entry(wc, e);
1261 wc->uncommitted_blocks++;
1263 if (WC_MODE_PMEM(wc)) {
1264 bio_copy_block(wc, bio, memory_data(wc, e));
1266 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1267 bio_set_dev(bio, wc->ssd_dev->bdev);
1268 bio->bi_iter.bi_sector = cache_sector(wc, e);
1269 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1270 wc->uncommitted_blocks = 0;
1271 queue_work(wc->writeback_wq, &wc->flush_work);
1273 writecache_schedule_autocommit(wc);
1277 } while (bio->bi_iter.bi_size);
1279 if (unlikely(bio->bi_opf & REQ_FUA ||
1280 wc->uncommitted_blocks >= wc->autocommit_blocks))
1281 writecache_flush(wc);
1283 writecache_schedule_autocommit(wc);
1287 unlock_remap_origin:
1288 bio_set_dev(bio, wc->dev->bdev);
1290 return DM_MAPIO_REMAPPED;
1293 /* make sure that writecache_end_io decrements bio_in_progress: */
1294 bio->bi_private = (void *)1;
1295 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1297 return DM_MAPIO_REMAPPED;
1302 return DM_MAPIO_SUBMITTED;
1306 return DM_MAPIO_SUBMITTED;
1311 return DM_MAPIO_SUBMITTED;
1314 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1316 struct dm_writecache *wc = ti->private;
1318 if (bio->bi_private != NULL) {
1319 int dir = bio_data_dir(bio);
1320 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1321 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1322 wake_up(&wc->bio_in_progress_wait[dir]);
1327 static int writecache_iterate_devices(struct dm_target *ti,
1328 iterate_devices_callout_fn fn, void *data)
1330 struct dm_writecache *wc = ti->private;
1332 return fn(ti, wc->dev, 0, ti->len, data);
1335 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1337 struct dm_writecache *wc = ti->private;
1339 if (limits->logical_block_size < wc->block_size)
1340 limits->logical_block_size = wc->block_size;
1342 if (limits->physical_block_size < wc->block_size)
1343 limits->physical_block_size = wc->block_size;
1345 if (limits->io_min < wc->block_size)
1346 limits->io_min = wc->block_size;
1350 static void writecache_writeback_endio(struct bio *bio)
1352 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1353 struct dm_writecache *wc = wb->wc;
1354 unsigned long flags;
1356 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1357 if (unlikely(list_empty(&wc->endio_list)))
1358 wake_up_process(wc->endio_thread);
1359 list_add_tail(&wb->endio_entry, &wc->endio_list);
1360 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1363 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1365 struct copy_struct *c = ptr;
1366 struct dm_writecache *wc = c->wc;
1368 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1370 raw_spin_lock_irq(&wc->endio_list_lock);
1371 if (unlikely(list_empty(&wc->endio_list)))
1372 wake_up_process(wc->endio_thread);
1373 list_add_tail(&c->endio_entry, &wc->endio_list);
1374 raw_spin_unlock_irq(&wc->endio_list_lock);
1377 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1380 struct writeback_struct *wb;
1382 unsigned long n_walked = 0;
1385 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1386 list_del(&wb->endio_entry);
1388 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1389 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1390 "write error %d", wb->bio.bi_status);
1394 BUG_ON(!e->write_in_progress);
1395 e->write_in_progress = false;
1396 INIT_LIST_HEAD(&e->lru);
1397 if (!writecache_has_error(wc))
1398 writecache_free_entry(wc, e);
1399 BUG_ON(!wc->writeback_size);
1400 wc->writeback_size--;
1402 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1403 writecache_commit_flushed(wc, false);
1408 } while (++i < wb->wc_list_n);
1410 if (wb->wc_list != wb->wc_list_inline)
1413 } while (!list_empty(list));
1416 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1418 struct copy_struct *c;
1422 c = list_entry(list->next, struct copy_struct, endio_entry);
1423 list_del(&c->endio_entry);
1425 if (unlikely(c->error))
1426 writecache_error(wc, c->error, "copy error");
1430 BUG_ON(!e->write_in_progress);
1431 e->write_in_progress = false;
1432 INIT_LIST_HEAD(&e->lru);
1433 if (!writecache_has_error(wc))
1434 writecache_free_entry(wc, e);
1436 BUG_ON(!wc->writeback_size);
1437 wc->writeback_size--;
1439 } while (--c->n_entries);
1440 mempool_free(c, &wc->copy_pool);
1441 } while (!list_empty(list));
1444 static int writecache_endio_thread(void *data)
1446 struct dm_writecache *wc = data;
1449 struct list_head list;
1451 raw_spin_lock_irq(&wc->endio_list_lock);
1452 if (!list_empty(&wc->endio_list))
1454 set_current_state(TASK_INTERRUPTIBLE);
1455 raw_spin_unlock_irq(&wc->endio_list_lock);
1457 if (unlikely(kthread_should_stop())) {
1458 set_current_state(TASK_RUNNING);
1467 list = wc->endio_list;
1468 list.next->prev = list.prev->next = &list;
1469 INIT_LIST_HEAD(&wc->endio_list);
1470 raw_spin_unlock_irq(&wc->endio_list_lock);
1472 if (!WC_MODE_FUA(wc))
1473 writecache_disk_flush(wc, wc->dev);
1477 if (WC_MODE_PMEM(wc)) {
1478 __writecache_endio_pmem(wc, &list);
1480 __writecache_endio_ssd(wc, &list);
1481 writecache_wait_for_ios(wc, READ);
1484 writecache_commit_flushed(wc, false);
1492 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1494 struct dm_writecache *wc = wb->wc;
1495 unsigned block_size = wc->block_size;
1496 void *address = memory_data(wc, e);
1498 persistent_memory_flush_cache(address, block_size);
1500 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
1503 return bio_add_page(&wb->bio, persistent_memory_page(address),
1504 block_size, persistent_memory_page_offset(address)) != 0;
1507 struct writeback_list {
1508 struct list_head list;
1512 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1514 if (unlikely(wc->max_writeback_jobs)) {
1515 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1517 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1518 writecache_wait_on_freelist(wc);
1525 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1527 struct wc_entry *e, *f;
1529 struct writeback_struct *wb;
1534 e = container_of(wbl->list.prev, struct wc_entry, lru);
1537 max_pages = e->wc_list_contiguous;
1539 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1540 wb = container_of(bio, struct writeback_struct, bio);
1542 bio->bi_end_io = writecache_writeback_endio;
1543 bio_set_dev(bio, wc->dev->bdev);
1544 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1545 if (max_pages <= WB_LIST_INLINE ||
1546 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1547 GFP_NOIO | __GFP_NORETRY |
1548 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1549 wb->wc_list = wb->wc_list_inline;
1550 max_pages = WB_LIST_INLINE;
1553 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1558 while (wbl->size && wb->wc_list_n < max_pages) {
1559 f = container_of(wbl->list.prev, struct wc_entry, lru);
1560 if (read_original_sector(wc, f) !=
1561 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1563 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1567 wb->wc_list[wb->wc_list_n++] = f;
1570 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1571 if (writecache_has_error(wc)) {
1572 bio->bi_status = BLK_STS_IOERR;
1574 } else if (unlikely(!bio_sectors(bio))) {
1575 bio->bi_status = BLK_STS_OK;
1581 __writeback_throttle(wc, wbl);
1585 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1587 struct wc_entry *e, *f;
1588 struct dm_io_region from, to;
1589 struct copy_struct *c;
1595 e = container_of(wbl->list.prev, struct wc_entry, lru);
1598 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1600 from.bdev = wc->ssd_dev->bdev;
1601 from.sector = cache_sector(wc, e);
1602 from.count = n_sectors;
1603 to.bdev = wc->dev->bdev;
1604 to.sector = read_original_sector(wc, e);
1605 to.count = n_sectors;
1607 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1610 c->n_entries = e->wc_list_contiguous;
1612 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1614 f = container_of(wbl->list.prev, struct wc_entry, lru);
1620 if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
1621 if (to.sector >= wc->data_device_sectors) {
1622 writecache_copy_endio(0, 0, c);
1625 from.count = to.count = wc->data_device_sectors - to.sector;
1628 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1630 __writeback_throttle(wc, wbl);
1634 static void writecache_writeback(struct work_struct *work)
1636 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1637 struct blk_plug plug;
1638 struct wc_entry *f, *g, *e = NULL;
1639 struct rb_node *node, *next_node;
1640 struct list_head skipped;
1641 struct writeback_list wbl;
1642 unsigned long n_walked;
1646 if (writecache_has_error(wc)) {
1651 if (unlikely(wc->writeback_all)) {
1652 if (writecache_wait_for_writeback(wc))
1656 if (wc->overwrote_committed) {
1657 writecache_wait_for_ios(wc, WRITE);
1661 INIT_LIST_HEAD(&skipped);
1662 INIT_LIST_HEAD(&wbl.list);
1664 while (!list_empty(&wc->lru) &&
1665 (wc->writeback_all ||
1666 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
1669 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1670 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1671 queue_work(wc->writeback_wq, &wc->writeback_work);
1675 if (unlikely(wc->writeback_all)) {
1677 writecache_flush(wc);
1678 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1682 e = container_of(wc->lru.prev, struct wc_entry, lru);
1683 BUG_ON(e->write_in_progress);
1684 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1685 writecache_flush(wc);
1687 node = rb_prev(&e->rb_node);
1689 f = container_of(node, struct wc_entry, rb_node);
1690 if (unlikely(read_original_sector(wc, f) ==
1691 read_original_sector(wc, e))) {
1692 BUG_ON(!f->write_in_progress);
1694 list_add(&e->lru, &skipped);
1699 wc->writeback_size++;
1701 list_add(&e->lru, &wbl.list);
1703 e->write_in_progress = true;
1704 e->wc_list_contiguous = 1;
1709 next_node = rb_next(&f->rb_node);
1710 if (unlikely(!next_node))
1712 g = container_of(next_node, struct wc_entry, rb_node);
1713 if (unlikely(read_original_sector(wc, g) ==
1714 read_original_sector(wc, f))) {
1718 if (read_original_sector(wc, g) !=
1719 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1721 if (unlikely(g->write_in_progress))
1723 if (unlikely(!writecache_entry_is_committed(wc, g)))
1726 if (!WC_MODE_PMEM(wc)) {
1732 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1735 wc->writeback_size++;
1737 list_add(&g->lru, &wbl.list);
1739 g->write_in_progress = true;
1740 g->wc_list_contiguous = BIO_MAX_PAGES;
1742 e->wc_list_contiguous++;
1743 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1744 if (unlikely(wc->writeback_all)) {
1745 next_node = rb_next(&f->rb_node);
1746 if (likely(next_node))
1747 g = container_of(next_node, struct wc_entry, rb_node);
1755 if (!list_empty(&skipped)) {
1756 list_splice_tail(&skipped, &wc->lru);
1758 * If we didn't do any progress, we must wait until some
1759 * writeback finishes to avoid burning CPU in a loop
1761 if (unlikely(!wbl.size))
1762 writecache_wait_for_writeback(wc);
1767 blk_start_plug(&plug);
1769 if (WC_MODE_PMEM(wc))
1770 __writecache_writeback_pmem(wc, &wbl);
1772 __writecache_writeback_ssd(wc, &wbl);
1774 blk_finish_plug(&plug);
1776 if (unlikely(wc->writeback_all)) {
1778 while (writecache_wait_for_writeback(wc));
1783 static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1784 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1786 uint64_t n_blocks, offset;
1789 n_blocks = device_size;
1790 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1795 /* Verify the following entries[n_blocks] won't overflow */
1796 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1797 sizeof(struct wc_memory_entry)))
1799 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1800 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1801 if (offset + n_blocks * block_size <= device_size)
1806 /* check if the bit field overflows */
1808 if (e.index != n_blocks)
1812 *n_blocks_p = n_blocks;
1813 if (n_metadata_blocks_p)
1814 *n_metadata_blocks_p = offset >> __ffs(block_size);
1818 static int init_memory(struct dm_writecache *wc)
1823 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1827 r = writecache_alloc_entries(wc);
1831 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1832 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1833 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1834 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1835 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1836 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1838 for (b = 0; b < wc->n_blocks; b++) {
1839 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1843 writecache_flush_all_metadata(wc);
1844 writecache_commit_flushed(wc, false);
1845 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1846 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1847 writecache_commit_flushed(wc, false);
1852 static void writecache_dtr(struct dm_target *ti)
1854 struct dm_writecache *wc = ti->private;
1859 if (wc->endio_thread)
1860 kthread_stop(wc->endio_thread);
1862 if (wc->flush_thread)
1863 kthread_stop(wc->flush_thread);
1865 bioset_exit(&wc->bio_set);
1867 mempool_exit(&wc->copy_pool);
1869 if (wc->writeback_wq)
1870 destroy_workqueue(wc->writeback_wq);
1873 dm_put_device(ti, wc->dev);
1876 dm_put_device(ti, wc->ssd_dev);
1881 if (wc->memory_map) {
1882 if (WC_MODE_PMEM(wc))
1883 persistent_memory_release(wc);
1885 vfree(wc->memory_map);
1889 dm_kcopyd_client_destroy(wc->dm_kcopyd);
1892 dm_io_client_destroy(wc->dm_io);
1894 if (wc->dirty_bitmap)
1895 vfree(wc->dirty_bitmap);
1900 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1902 struct dm_writecache *wc;
1903 struct dm_arg_set as;
1905 unsigned opt_params;
1906 size_t offset, data_size;
1909 int high_wm_percent = HIGH_WATERMARK;
1910 int low_wm_percent = LOW_WATERMARK;
1912 struct wc_memory_superblock s;
1914 static struct dm_arg _args[] = {
1915 {0, 16, "Invalid number of feature args"},
1921 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1923 ti->error = "Cannot allocate writecache structure";
1930 mutex_init(&wc->lock);
1931 writecache_poison_lists(wc);
1932 init_waitqueue_head(&wc->freelist_wait);
1933 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1935 for (i = 0; i < 2; i++) {
1936 atomic_set(&wc->bio_in_progress[i], 0);
1937 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1940 wc->dm_io = dm_io_client_create();
1941 if (IS_ERR(wc->dm_io)) {
1942 r = PTR_ERR(wc->dm_io);
1943 ti->error = "Unable to allocate dm-io client";
1948 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
1949 if (!wc->writeback_wq) {
1951 ti->error = "Could not allocate writeback workqueue";
1954 INIT_WORK(&wc->writeback_work, writecache_writeback);
1955 INIT_WORK(&wc->flush_work, writecache_flush_work);
1957 raw_spin_lock_init(&wc->endio_list_lock);
1958 INIT_LIST_HEAD(&wc->endio_list);
1959 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
1960 if (IS_ERR(wc->endio_thread)) {
1961 r = PTR_ERR(wc->endio_thread);
1962 wc->endio_thread = NULL;
1963 ti->error = "Couldn't spawn endio thread";
1966 wake_up_process(wc->endio_thread);
1969 * Parse the mode (pmem or ssd)
1971 string = dm_shift_arg(&as);
1975 if (!strcasecmp(string, "s")) {
1976 wc->pmem_mode = false;
1977 } else if (!strcasecmp(string, "p")) {
1978 #ifdef DM_WRITECACHE_HAS_PMEM
1979 wc->pmem_mode = true;
1980 wc->writeback_fua = true;
1983 * If the architecture doesn't support persistent memory or
1984 * the kernel doesn't support any DAX drivers, this driver can
1985 * only be used in SSD-only mode.
1988 ti->error = "Persistent memory or DAX not supported on this system";
1995 if (WC_MODE_PMEM(wc)) {
1996 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
1997 offsetof(struct writeback_struct, bio),
2000 ti->error = "Could not allocate bio set";
2004 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2006 ti->error = "Could not allocate mempool";
2012 * Parse the origin data device
2014 string = dm_shift_arg(&as);
2017 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2019 ti->error = "Origin data device lookup failed";
2024 * Parse cache data device (be it pmem or ssd)
2026 string = dm_shift_arg(&as);
2030 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2032 ti->error = "Cache data device lookup failed";
2035 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2038 * Parse the cache block size
2040 string = dm_shift_arg(&as);
2043 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2044 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2045 (wc->block_size & (wc->block_size - 1))) {
2047 ti->error = "Invalid block size";
2050 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2051 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2053 ti->error = "Block size is smaller than device logical block size";
2056 wc->block_size_bits = __ffs(wc->block_size);
2058 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2059 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2060 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2063 * Parse optional arguments
2065 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2069 while (opt_params) {
2070 string = dm_shift_arg(&as), opt_params--;
2071 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2072 unsigned long long start_sector;
2073 string = dm_shift_arg(&as), opt_params--;
2074 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2075 goto invalid_optional;
2076 wc->start_sector = start_sector;
2077 wc->start_sector_set = true;
2078 if (wc->start_sector != start_sector ||
2079 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2080 goto invalid_optional;
2081 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2082 string = dm_shift_arg(&as), opt_params--;
2083 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2084 goto invalid_optional;
2085 if (high_wm_percent < 0 || high_wm_percent > 100)
2086 goto invalid_optional;
2087 wc->high_wm_percent_value = high_wm_percent;
2088 wc->high_wm_percent_set = true;
2089 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2090 string = dm_shift_arg(&as), opt_params--;
2091 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2092 goto invalid_optional;
2093 if (low_wm_percent < 0 || low_wm_percent > 100)
2094 goto invalid_optional;
2095 wc->low_wm_percent_value = low_wm_percent;
2096 wc->low_wm_percent_set = true;
2097 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2098 string = dm_shift_arg(&as), opt_params--;
2099 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2100 goto invalid_optional;
2101 wc->max_writeback_jobs_set = true;
2102 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2103 string = dm_shift_arg(&as), opt_params--;
2104 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2105 goto invalid_optional;
2106 wc->autocommit_blocks_set = true;
2107 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2108 unsigned autocommit_msecs;
2109 string = dm_shift_arg(&as), opt_params--;
2110 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2111 goto invalid_optional;
2112 if (autocommit_msecs > 3600000)
2113 goto invalid_optional;
2114 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2115 wc->autocommit_time_value = autocommit_msecs;
2116 wc->autocommit_time_set = true;
2117 } else if (!strcasecmp(string, "fua")) {
2118 if (WC_MODE_PMEM(wc)) {
2119 wc->writeback_fua = true;
2120 wc->writeback_fua_set = true;
2121 } else goto invalid_optional;
2122 } else if (!strcasecmp(string, "nofua")) {
2123 if (WC_MODE_PMEM(wc)) {
2124 wc->writeback_fua = false;
2125 wc->writeback_fua_set = true;
2126 } else goto invalid_optional;
2130 ti->error = "Invalid optional argument";
2135 if (high_wm_percent < low_wm_percent) {
2137 ti->error = "High watermark must be greater than or equal to low watermark";
2141 if (WC_MODE_PMEM(wc)) {
2142 if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2144 ti->error = "Asynchronous persistent memory not supported as pmem cache";
2148 r = persistent_memory_claim(wc);
2150 ti->error = "Unable to map persistent memory for cache";
2154 size_t n_blocks, n_metadata_blocks;
2155 uint64_t n_bitmap_bits;
2157 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2159 bio_list_init(&wc->flush_list);
2160 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2161 if (IS_ERR(wc->flush_thread)) {
2162 r = PTR_ERR(wc->flush_thread);
2163 wc->flush_thread = NULL;
2164 ti->error = "Couldn't spawn flush thread";
2167 wake_up_process(wc->flush_thread);
2169 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2170 &n_blocks, &n_metadata_blocks);
2172 ti->error = "Invalid device size";
2176 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2177 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2178 /* this is limitation of test_bit functions */
2179 if (n_bitmap_bits > 1U << 31) {
2181 ti->error = "Invalid device size";
2185 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2186 if (!wc->memory_map) {
2188 ti->error = "Unable to allocate memory for metadata";
2192 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2193 if (IS_ERR(wc->dm_kcopyd)) {
2194 r = PTR_ERR(wc->dm_kcopyd);
2195 ti->error = "Unable to allocate dm-kcopyd client";
2196 wc->dm_kcopyd = NULL;
2200 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2201 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2202 BITS_PER_LONG * sizeof(unsigned long);
2203 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2204 if (!wc->dirty_bitmap) {
2206 ti->error = "Unable to allocate dirty bitmap";
2210 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2212 ti->error = "Unable to read first block of metadata";
2217 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2219 ti->error = "Hardware memory error when reading superblock";
2222 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2223 r = init_memory(wc);
2225 ti->error = "Unable to initialize device";
2228 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2230 ti->error = "Hardware memory error when reading superblock";
2235 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2236 ti->error = "Invalid magic in the superblock";
2241 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2242 ti->error = "Invalid version in the superblock";
2247 if (le32_to_cpu(s.block_size) != wc->block_size) {
2248 ti->error = "Block size does not match superblock";
2253 wc->n_blocks = le64_to_cpu(s.n_blocks);
2255 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2256 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2258 ti->error = "Overflow in size calculation";
2262 offset += sizeof(struct wc_memory_superblock);
2263 if (offset < sizeof(struct wc_memory_superblock))
2265 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2266 data_size = wc->n_blocks * (size_t)wc->block_size;
2267 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2268 (offset + data_size < offset))
2270 if (offset + data_size > wc->memory_map_size) {
2271 ti->error = "Memory area is too small";
2276 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2277 wc->block_start = (char *)sb(wc) + offset;
2279 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2282 wc->freelist_high_watermark = x;
2283 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2286 wc->freelist_low_watermark = x;
2288 r = writecache_alloc_entries(wc);
2290 ti->error = "Cannot allocate memory";
2294 ti->num_flush_bios = 1;
2295 ti->flush_supported = true;
2296 ti->num_discard_bios = 1;
2298 if (WC_MODE_PMEM(wc))
2299 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2305 ti->error = "Bad arguments";
2311 static void writecache_status(struct dm_target *ti, status_type_t type,
2312 unsigned status_flags, char *result, unsigned maxlen)
2314 struct dm_writecache *wc = ti->private;
2315 unsigned extra_args;
2319 case STATUSTYPE_INFO:
2320 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2321 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2322 (unsigned long long)wc->writeback_size);
2324 case STATUSTYPE_TABLE:
2325 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2326 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2328 if (wc->start_sector_set)
2330 if (wc->high_wm_percent_set)
2332 if (wc->low_wm_percent_set)
2334 if (wc->max_writeback_jobs_set)
2336 if (wc->autocommit_blocks_set)
2338 if (wc->autocommit_time_set)
2340 if (wc->writeback_fua_set)
2343 DMEMIT("%u", extra_args);
2344 if (wc->start_sector_set)
2345 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2346 if (wc->high_wm_percent_set)
2347 DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
2348 if (wc->low_wm_percent_set)
2349 DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
2350 if (wc->max_writeback_jobs_set)
2351 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2352 if (wc->autocommit_blocks_set)
2353 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2354 if (wc->autocommit_time_set)
2355 DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
2356 if (wc->writeback_fua_set)
2357 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2362 static struct target_type writecache_target = {
2363 .name = "writecache",
2364 .version = {1, 1, 1},
2365 .module = THIS_MODULE,
2366 .ctr = writecache_ctr,
2367 .dtr = writecache_dtr,
2368 .status = writecache_status,
2369 .postsuspend = writecache_suspend,
2370 .resume = writecache_resume,
2371 .message = writecache_message,
2372 .map = writecache_map,
2373 .end_io = writecache_end_io,
2374 .iterate_devices = writecache_iterate_devices,
2375 .io_hints = writecache_io_hints,
2378 static int __init dm_writecache_init(void)
2382 r = dm_register_target(&writecache_target);
2384 DMERR("register failed %d", r);
2391 static void __exit dm_writecache_exit(void)
2393 dm_unregister_target(&writecache_target);
2396 module_init(dm_writecache_init);
2397 module_exit(dm_writecache_exit);
2399 MODULE_DESCRIPTION(DM_NAME " writecache target");
2400 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2401 MODULE_LICENSE("GPL");