GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / md / dm-writecache.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Red Hat. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
18
19 #define DM_MSG_PREFIX "writecache"
20
21 #define HIGH_WATERMARK                  50
22 #define LOW_WATERMARK                   45
23 #define MAX_WRITEBACK_JOBS              min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
24 #define ENDIO_LATENCY                   16
25 #define WRITEBACK_LATENCY               64
26 #define AUTOCOMMIT_BLOCKS_SSD           65536
27 #define AUTOCOMMIT_BLOCKS_PMEM          64
28 #define AUTOCOMMIT_MSEC                 1000
29 #define MAX_AGE_DIV                     16
30 #define MAX_AGE_UNSPECIFIED             -1UL
31
32 #define BITMAP_GRANULARITY      65536
33 #if BITMAP_GRANULARITY < PAGE_SIZE
34 #undef BITMAP_GRANULARITY
35 #define BITMAP_GRANULARITY      PAGE_SIZE
36 #endif
37
38 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
39 #define DM_WRITECACHE_HAS_PMEM
40 #endif
41
42 #ifdef DM_WRITECACHE_HAS_PMEM
43 #define pmem_assign(dest, src)                                  \
44 do {                                                            \
45         typeof(dest) uniq = (src);                              \
46         memcpy_flushcache(&(dest), &uniq, sizeof(dest));        \
47 } while (0)
48 #else
49 #define pmem_assign(dest, src)  ((dest) = (src))
50 #endif
51
52 #if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM)
53 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
54 #endif
55
56 #define MEMORY_SUPERBLOCK_MAGIC         0x23489321
57 #define MEMORY_SUPERBLOCK_VERSION       1
58
59 struct wc_memory_entry {
60         __le64 original_sector;
61         __le64 seq_count;
62 };
63
64 struct wc_memory_superblock {
65         union {
66                 struct {
67                         __le32 magic;
68                         __le32 version;
69                         __le32 block_size;
70                         __le32 pad;
71                         __le64 n_blocks;
72                         __le64 seq_count;
73                 };
74                 __le64 padding[8];
75         };
76         struct wc_memory_entry entries[0];
77 };
78
79 struct wc_entry {
80         struct rb_node rb_node;
81         struct list_head lru;
82         unsigned short wc_list_contiguous;
83         bool write_in_progress
84 #if BITS_PER_LONG == 64
85                 :1
86 #endif
87         ;
88         unsigned long index
89 #if BITS_PER_LONG == 64
90                 :47
91 #endif
92         ;
93         unsigned long age;
94 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
95         uint64_t original_sector;
96         uint64_t seq_count;
97 #endif
98 };
99
100 #ifdef DM_WRITECACHE_HAS_PMEM
101 #define WC_MODE_PMEM(wc)                        ((wc)->pmem_mode)
102 #define WC_MODE_FUA(wc)                         ((wc)->writeback_fua)
103 #else
104 #define WC_MODE_PMEM(wc)                        false
105 #define WC_MODE_FUA(wc)                         false
106 #endif
107 #define WC_MODE_SORT_FREELIST(wc)               (!WC_MODE_PMEM(wc))
108
109 struct dm_writecache {
110         struct mutex lock;
111         struct list_head lru;
112         union {
113                 struct list_head freelist;
114                 struct {
115                         struct rb_root freetree;
116                         struct wc_entry *current_free;
117                 };
118         };
119         struct rb_root tree;
120
121         size_t freelist_size;
122         size_t writeback_size;
123         size_t freelist_high_watermark;
124         size_t freelist_low_watermark;
125         unsigned long max_age;
126
127         unsigned uncommitted_blocks;
128         unsigned autocommit_blocks;
129         unsigned max_writeback_jobs;
130
131         int error;
132
133         unsigned long autocommit_jiffies;
134         struct timer_list autocommit_timer;
135         struct wait_queue_head freelist_wait;
136
137         struct timer_list max_age_timer;
138
139         atomic_t bio_in_progress[2];
140         struct wait_queue_head bio_in_progress_wait[2];
141
142         struct dm_target *ti;
143         struct dm_dev *dev;
144         struct dm_dev *ssd_dev;
145         sector_t start_sector;
146         void *memory_map;
147         uint64_t memory_map_size;
148         size_t metadata_sectors;
149         size_t n_blocks;
150         uint64_t seq_count;
151         sector_t data_device_sectors;
152         void *block_start;
153         struct wc_entry *entries;
154         unsigned block_size;
155         unsigned char block_size_bits;
156
157         bool pmem_mode:1;
158         bool writeback_fua:1;
159
160         bool overwrote_committed:1;
161         bool memory_vmapped:1;
162
163         bool start_sector_set:1;
164         bool high_wm_percent_set:1;
165         bool low_wm_percent_set:1;
166         bool max_writeback_jobs_set:1;
167         bool autocommit_blocks_set:1;
168         bool autocommit_time_set:1;
169         bool max_age_set:1;
170         bool writeback_fua_set:1;
171         bool flush_on_suspend:1;
172         bool cleaner:1;
173         bool cleaner_set:1;
174
175         unsigned high_wm_percent_value;
176         unsigned low_wm_percent_value;
177         unsigned autocommit_time_value;
178         unsigned max_age_value;
179
180         unsigned writeback_all;
181         struct workqueue_struct *writeback_wq;
182         struct work_struct writeback_work;
183         struct work_struct flush_work;
184
185         struct dm_io_client *dm_io;
186
187         raw_spinlock_t endio_list_lock;
188         struct list_head endio_list;
189         struct task_struct *endio_thread;
190
191         struct task_struct *flush_thread;
192         struct bio_list flush_list;
193
194         struct dm_kcopyd_client *dm_kcopyd;
195         unsigned long *dirty_bitmap;
196         unsigned dirty_bitmap_size;
197
198         struct bio_set bio_set;
199         mempool_t copy_pool;
200 };
201
202 #define WB_LIST_INLINE          16
203
204 struct writeback_struct {
205         struct list_head endio_entry;
206         struct dm_writecache *wc;
207         struct wc_entry **wc_list;
208         unsigned wc_list_n;
209         struct wc_entry *wc_list_inline[WB_LIST_INLINE];
210         struct bio bio;
211 };
212
213 struct copy_struct {
214         struct list_head endio_entry;
215         struct dm_writecache *wc;
216         struct wc_entry *e;
217         unsigned n_entries;
218         int error;
219 };
220
221 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
222                                             "A percentage of time allocated for data copying");
223
224 static void wc_lock(struct dm_writecache *wc)
225 {
226         mutex_lock(&wc->lock);
227 }
228
229 static void wc_unlock(struct dm_writecache *wc)
230 {
231         mutex_unlock(&wc->lock);
232 }
233
234 #ifdef DM_WRITECACHE_HAS_PMEM
235 static int persistent_memory_claim(struct dm_writecache *wc)
236 {
237         int r;
238         loff_t s;
239         long p, da;
240         pfn_t pfn;
241         int id;
242         struct page **pages;
243         sector_t offset;
244
245         wc->memory_vmapped = false;
246
247         s = wc->memory_map_size;
248         p = s >> PAGE_SHIFT;
249         if (!p) {
250                 r = -EINVAL;
251                 goto err1;
252         }
253         if (p != s >> PAGE_SHIFT) {
254                 r = -EOVERFLOW;
255                 goto err1;
256         }
257
258         offset = get_start_sect(wc->ssd_dev->bdev);
259         if (offset & (PAGE_SIZE / 512 - 1)) {
260                 r = -EINVAL;
261                 goto err1;
262         }
263         offset >>= PAGE_SHIFT - 9;
264
265         id = dax_read_lock();
266
267         da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
268         if (da < 0) {
269                 wc->memory_map = NULL;
270                 r = da;
271                 goto err2;
272         }
273         if (!pfn_t_has_page(pfn)) {
274                 wc->memory_map = NULL;
275                 r = -EOPNOTSUPP;
276                 goto err2;
277         }
278         if (da != p) {
279                 long i;
280                 wc->memory_map = NULL;
281                 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
282                 if (!pages) {
283                         r = -ENOMEM;
284                         goto err2;
285                 }
286                 i = 0;
287                 do {
288                         long daa;
289                         daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
290                                                 NULL, &pfn);
291                         if (daa <= 0) {
292                                 r = daa ? daa : -EINVAL;
293                                 goto err3;
294                         }
295                         if (!pfn_t_has_page(pfn)) {
296                                 r = -EOPNOTSUPP;
297                                 goto err3;
298                         }
299                         while (daa-- && i < p) {
300                                 pages[i++] = pfn_t_to_page(pfn);
301                                 pfn.val++;
302                                 if (!(i & 15))
303                                         cond_resched();
304                         }
305                 } while (i < p);
306                 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
307                 if (!wc->memory_map) {
308                         r = -ENOMEM;
309                         goto err3;
310                 }
311                 kvfree(pages);
312                 wc->memory_vmapped = true;
313         }
314
315         dax_read_unlock(id);
316
317         wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
318         wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
319
320         return 0;
321 err3:
322         kvfree(pages);
323 err2:
324         dax_read_unlock(id);
325 err1:
326         return r;
327 }
328 #else
329 static int persistent_memory_claim(struct dm_writecache *wc)
330 {
331         return -EOPNOTSUPP;
332 }
333 #endif
334
335 static void persistent_memory_release(struct dm_writecache *wc)
336 {
337         if (wc->memory_vmapped)
338                 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
339 }
340
341 static struct page *persistent_memory_page(void *addr)
342 {
343         if (is_vmalloc_addr(addr))
344                 return vmalloc_to_page(addr);
345         else
346                 return virt_to_page(addr);
347 }
348
349 static unsigned persistent_memory_page_offset(void *addr)
350 {
351         return (unsigned long)addr & (PAGE_SIZE - 1);
352 }
353
354 static void persistent_memory_flush_cache(void *ptr, size_t size)
355 {
356         if (is_vmalloc_addr(ptr))
357                 flush_kernel_vmap_range(ptr, size);
358 }
359
360 static void persistent_memory_invalidate_cache(void *ptr, size_t size)
361 {
362         if (is_vmalloc_addr(ptr))
363                 invalidate_kernel_vmap_range(ptr, size);
364 }
365
366 static struct wc_memory_superblock *sb(struct dm_writecache *wc)
367 {
368         return wc->memory_map;
369 }
370
371 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
372 {
373         return &sb(wc)->entries[e->index];
374 }
375
376 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
377 {
378         return (char *)wc->block_start + (e->index << wc->block_size_bits);
379 }
380
381 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
382 {
383         return wc->start_sector + wc->metadata_sectors +
384                 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
385 }
386
387 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
388 {
389 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
390         return e->original_sector;
391 #else
392         return le64_to_cpu(memory_entry(wc, e)->original_sector);
393 #endif
394 }
395
396 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
397 {
398 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
399         return e->seq_count;
400 #else
401         return le64_to_cpu(memory_entry(wc, e)->seq_count);
402 #endif
403 }
404
405 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
406 {
407 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
408         e->seq_count = -1;
409 #endif
410         pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
411 }
412
413 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
414                                             uint64_t original_sector, uint64_t seq_count)
415 {
416         struct wc_memory_entry me;
417 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
418         e->original_sector = original_sector;
419         e->seq_count = seq_count;
420 #endif
421         me.original_sector = cpu_to_le64(original_sector);
422         me.seq_count = cpu_to_le64(seq_count);
423         pmem_assign(*memory_entry(wc, e), me);
424 }
425
426 #define writecache_error(wc, err, msg, arg...)                          \
427 do {                                                                    \
428         if (!cmpxchg(&(wc)->error, 0, err))                             \
429                 DMERR(msg, ##arg);                                      \
430         wake_up(&(wc)->freelist_wait);                                  \
431 } while (0)
432
433 #define writecache_has_error(wc)        (unlikely(READ_ONCE((wc)->error)))
434
435 static void writecache_flush_all_metadata(struct dm_writecache *wc)
436 {
437         if (!WC_MODE_PMEM(wc))
438                 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
439 }
440
441 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
442 {
443         if (!WC_MODE_PMEM(wc))
444                 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
445                           wc->dirty_bitmap);
446 }
447
448 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
449
450 struct io_notify {
451         struct dm_writecache *wc;
452         struct completion c;
453         atomic_t count;
454 };
455
456 static void writecache_notify_io(unsigned long error, void *context)
457 {
458         struct io_notify *endio = context;
459
460         if (unlikely(error != 0))
461                 writecache_error(endio->wc, -EIO, "error writing metadata");
462         BUG_ON(atomic_read(&endio->count) <= 0);
463         if (atomic_dec_and_test(&endio->count))
464                 complete(&endio->c);
465 }
466
467 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
468 {
469         wait_event(wc->bio_in_progress_wait[direction],
470                    !atomic_read(&wc->bio_in_progress[direction]));
471 }
472
473 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
474 {
475         struct dm_io_region region;
476         struct dm_io_request req;
477         struct io_notify endio = {
478                 wc,
479                 COMPLETION_INITIALIZER_ONSTACK(endio.c),
480                 ATOMIC_INIT(1),
481         };
482         unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
483         unsigned i = 0;
484
485         while (1) {
486                 unsigned j;
487                 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
488                 if (unlikely(i == bitmap_bits))
489                         break;
490                 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
491
492                 region.bdev = wc->ssd_dev->bdev;
493                 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
494                 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
495
496                 if (unlikely(region.sector >= wc->metadata_sectors))
497                         break;
498                 if (unlikely(region.sector + region.count > wc->metadata_sectors))
499                         region.count = wc->metadata_sectors - region.sector;
500
501                 region.sector += wc->start_sector;
502                 atomic_inc(&endio.count);
503                 req.bi_op = REQ_OP_WRITE;
504                 req.bi_op_flags = REQ_SYNC;
505                 req.mem.type = DM_IO_VMA;
506                 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
507                 req.client = wc->dm_io;
508                 req.notify.fn = writecache_notify_io;
509                 req.notify.context = &endio;
510
511                 /* writing via async dm-io (implied by notify.fn above) won't return an error */
512                 (void) dm_io(&req, 1, &region, NULL);
513                 i = j;
514         }
515
516         writecache_notify_io(0, &endio);
517         wait_for_completion_io(&endio.c);
518
519         if (wait_for_ios)
520                 writecache_wait_for_ios(wc, WRITE);
521
522         writecache_disk_flush(wc, wc->ssd_dev);
523
524         memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
525 }
526
527 static void ssd_commit_superblock(struct dm_writecache *wc)
528 {
529         int r;
530         struct dm_io_region region;
531         struct dm_io_request req;
532
533         region.bdev = wc->ssd_dev->bdev;
534         region.sector = 0;
535         region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;
536
537         if (unlikely(region.sector + region.count > wc->metadata_sectors))
538                 region.count = wc->metadata_sectors - region.sector;
539
540         region.sector += wc->start_sector;
541
542         req.bi_op = REQ_OP_WRITE;
543         req.bi_op_flags = REQ_SYNC | REQ_FUA;
544         req.mem.type = DM_IO_VMA;
545         req.mem.ptr.vma = (char *)wc->memory_map;
546         req.client = wc->dm_io;
547         req.notify.fn = NULL;
548         req.notify.context = NULL;
549
550         r = dm_io(&req, 1, &region, NULL);
551         if (unlikely(r))
552                 writecache_error(wc, r, "error writing superblock");
553 }
554
555 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
556 {
557         if (WC_MODE_PMEM(wc))
558                 pmem_wmb();
559         else
560                 ssd_commit_flushed(wc, wait_for_ios);
561 }
562
563 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
564 {
565         int r;
566         struct dm_io_region region;
567         struct dm_io_request req;
568
569         region.bdev = dev->bdev;
570         region.sector = 0;
571         region.count = 0;
572         req.bi_op = REQ_OP_WRITE;
573         req.bi_op_flags = REQ_PREFLUSH;
574         req.mem.type = DM_IO_KMEM;
575         req.mem.ptr.addr = NULL;
576         req.client = wc->dm_io;
577         req.notify.fn = NULL;
578
579         r = dm_io(&req, 1, &region, NULL);
580         if (unlikely(r))
581                 writecache_error(wc, r, "error flushing metadata: %d", r);
582 }
583
584 #define WFE_RETURN_FOLLOWING    1
585 #define WFE_LOWEST_SEQ          2
586
587 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
588                                               uint64_t block, int flags)
589 {
590         struct wc_entry *e;
591         struct rb_node *node = wc->tree.rb_node;
592
593         if (unlikely(!node))
594                 return NULL;
595
596         while (1) {
597                 e = container_of(node, struct wc_entry, rb_node);
598                 if (read_original_sector(wc, e) == block)
599                         break;
600
601                 node = (read_original_sector(wc, e) >= block ?
602                         e->rb_node.rb_left : e->rb_node.rb_right);
603                 if (unlikely(!node)) {
604                         if (!(flags & WFE_RETURN_FOLLOWING))
605                                 return NULL;
606                         if (read_original_sector(wc, e) >= block) {
607                                 return e;
608                         } else {
609                                 node = rb_next(&e->rb_node);
610                                 if (unlikely(!node))
611                                         return NULL;
612                                 e = container_of(node, struct wc_entry, rb_node);
613                                 return e;
614                         }
615                 }
616         }
617
618         while (1) {
619                 struct wc_entry *e2;
620                 if (flags & WFE_LOWEST_SEQ)
621                         node = rb_prev(&e->rb_node);
622                 else
623                         node = rb_next(&e->rb_node);
624                 if (unlikely(!node))
625                         return e;
626                 e2 = container_of(node, struct wc_entry, rb_node);
627                 if (read_original_sector(wc, e2) != block)
628                         return e;
629                 e = e2;
630         }
631 }
632
633 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
634 {
635         struct wc_entry *e;
636         struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
637
638         while (*node) {
639                 e = container_of(*node, struct wc_entry, rb_node);
640                 parent = &e->rb_node;
641                 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
642                         node = &parent->rb_left;
643                 else
644                         node = &parent->rb_right;
645         }
646         rb_link_node(&ins->rb_node, parent, node);
647         rb_insert_color(&ins->rb_node, &wc->tree);
648         list_add(&ins->lru, &wc->lru);
649         ins->age = jiffies;
650 }
651
652 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
653 {
654         list_del(&e->lru);
655         rb_erase(&e->rb_node, &wc->tree);
656 }
657
658 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
659 {
660         if (WC_MODE_SORT_FREELIST(wc)) {
661                 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
662                 if (unlikely(!*node))
663                         wc->current_free = e;
664                 while (*node) {
665                         parent = *node;
666                         if (&e->rb_node < *node)
667                                 node = &parent->rb_left;
668                         else
669                                 node = &parent->rb_right;
670                 }
671                 rb_link_node(&e->rb_node, parent, node);
672                 rb_insert_color(&e->rb_node, &wc->freetree);
673         } else {
674                 list_add_tail(&e->lru, &wc->freelist);
675         }
676         wc->freelist_size++;
677 }
678
679 static inline void writecache_verify_watermark(struct dm_writecache *wc)
680 {
681         if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
682                 queue_work(wc->writeback_wq, &wc->writeback_work);
683 }
684
685 static void writecache_max_age_timer(struct timer_list *t)
686 {
687         struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
688
689         if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
690                 queue_work(wc->writeback_wq, &wc->writeback_work);
691                 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
692         }
693 }
694
695 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
696 {
697         struct wc_entry *e;
698
699         if (WC_MODE_SORT_FREELIST(wc)) {
700                 struct rb_node *next;
701                 if (unlikely(!wc->current_free))
702                         return NULL;
703                 e = wc->current_free;
704                 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
705                         return NULL;
706                 next = rb_next(&e->rb_node);
707                 rb_erase(&e->rb_node, &wc->freetree);
708                 if (unlikely(!next))
709                         next = rb_first(&wc->freetree);
710                 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
711         } else {
712                 if (unlikely(list_empty(&wc->freelist)))
713                         return NULL;
714                 e = container_of(wc->freelist.next, struct wc_entry, lru);
715                 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
716                         return NULL;
717                 list_del(&e->lru);
718         }
719         wc->freelist_size--;
720
721         writecache_verify_watermark(wc);
722
723         return e;
724 }
725
726 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
727 {
728         writecache_unlink(wc, e);
729         writecache_add_to_freelist(wc, e);
730         clear_seq_count(wc, e);
731         writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
732         if (unlikely(waitqueue_active(&wc->freelist_wait)))
733                 wake_up(&wc->freelist_wait);
734 }
735
736 static void writecache_wait_on_freelist(struct dm_writecache *wc)
737 {
738         DEFINE_WAIT(wait);
739
740         prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
741         wc_unlock(wc);
742         io_schedule();
743         finish_wait(&wc->freelist_wait, &wait);
744         wc_lock(wc);
745 }
746
747 static void writecache_poison_lists(struct dm_writecache *wc)
748 {
749         /*
750          * Catch incorrect access to these values while the device is suspended.
751          */
752         memset(&wc->tree, -1, sizeof wc->tree);
753         wc->lru.next = LIST_POISON1;
754         wc->lru.prev = LIST_POISON2;
755         wc->freelist.next = LIST_POISON1;
756         wc->freelist.prev = LIST_POISON2;
757 }
758
759 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
760 {
761         writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
762         if (WC_MODE_PMEM(wc))
763                 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
764 }
765
766 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
767 {
768         return read_seq_count(wc, e) < wc->seq_count;
769 }
770
771 static void writecache_flush(struct dm_writecache *wc)
772 {
773         struct wc_entry *e, *e2;
774         bool need_flush_after_free;
775
776         wc->uncommitted_blocks = 0;
777         del_timer(&wc->autocommit_timer);
778
779         if (list_empty(&wc->lru))
780                 return;
781
782         e = container_of(wc->lru.next, struct wc_entry, lru);
783         if (writecache_entry_is_committed(wc, e)) {
784                 if (wc->overwrote_committed) {
785                         writecache_wait_for_ios(wc, WRITE);
786                         writecache_disk_flush(wc, wc->ssd_dev);
787                         wc->overwrote_committed = false;
788                 }
789                 return;
790         }
791         while (1) {
792                 writecache_flush_entry(wc, e);
793                 if (unlikely(e->lru.next == &wc->lru))
794                         break;
795                 e2 = container_of(e->lru.next, struct wc_entry, lru);
796                 if (writecache_entry_is_committed(wc, e2))
797                         break;
798                 e = e2;
799                 cond_resched();
800         }
801         writecache_commit_flushed(wc, true);
802
803         wc->seq_count++;
804         pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
805         if (WC_MODE_PMEM(wc))
806                 writecache_commit_flushed(wc, false);
807         else
808                 ssd_commit_superblock(wc);
809
810         wc->overwrote_committed = false;
811
812         need_flush_after_free = false;
813         while (1) {
814                 /* Free another committed entry with lower seq-count */
815                 struct rb_node *rb_node = rb_prev(&e->rb_node);
816
817                 if (rb_node) {
818                         e2 = container_of(rb_node, struct wc_entry, rb_node);
819                         if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
820                             likely(!e2->write_in_progress)) {
821                                 writecache_free_entry(wc, e2);
822                                 need_flush_after_free = true;
823                         }
824                 }
825                 if (unlikely(e->lru.prev == &wc->lru))
826                         break;
827                 e = container_of(e->lru.prev, struct wc_entry, lru);
828                 cond_resched();
829         }
830
831         if (need_flush_after_free)
832                 writecache_commit_flushed(wc, false);
833 }
834
835 static void writecache_flush_work(struct work_struct *work)
836 {
837         struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
838
839         wc_lock(wc);
840         writecache_flush(wc);
841         wc_unlock(wc);
842 }
843
844 static void writecache_autocommit_timer(struct timer_list *t)
845 {
846         struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
847         if (!writecache_has_error(wc))
848                 queue_work(wc->writeback_wq, &wc->flush_work);
849 }
850
851 static void writecache_schedule_autocommit(struct dm_writecache *wc)
852 {
853         if (!timer_pending(&wc->autocommit_timer))
854                 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
855 }
856
857 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
858 {
859         struct wc_entry *e;
860         bool discarded_something = false;
861
862         e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
863         if (unlikely(!e))
864                 return;
865
866         while (read_original_sector(wc, e) < end) {
867                 struct rb_node *node = rb_next(&e->rb_node);
868
869                 if (likely(!e->write_in_progress)) {
870                         if (!discarded_something) {
871                                 if (!WC_MODE_PMEM(wc)) {
872                                         writecache_wait_for_ios(wc, READ);
873                                         writecache_wait_for_ios(wc, WRITE);
874                                 }
875                                 discarded_something = true;
876                         }
877                         if (!writecache_entry_is_committed(wc, e))
878                                 wc->uncommitted_blocks--;
879                         writecache_free_entry(wc, e);
880                 }
881
882                 if (unlikely(!node))
883                         break;
884
885                 e = container_of(node, struct wc_entry, rb_node);
886         }
887
888         if (discarded_something)
889                 writecache_commit_flushed(wc, false);
890 }
891
892 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
893 {
894         if (wc->writeback_size) {
895                 writecache_wait_on_freelist(wc);
896                 return true;
897         }
898         return false;
899 }
900
901 static void writecache_suspend(struct dm_target *ti)
902 {
903         struct dm_writecache *wc = ti->private;
904         bool flush_on_suspend;
905
906         del_timer_sync(&wc->autocommit_timer);
907         del_timer_sync(&wc->max_age_timer);
908
909         wc_lock(wc);
910         writecache_flush(wc);
911         flush_on_suspend = wc->flush_on_suspend;
912         if (flush_on_suspend) {
913                 wc->flush_on_suspend = false;
914                 wc->writeback_all++;
915                 queue_work(wc->writeback_wq, &wc->writeback_work);
916         }
917         wc_unlock(wc);
918
919         drain_workqueue(wc->writeback_wq);
920
921         wc_lock(wc);
922         if (flush_on_suspend)
923                 wc->writeback_all--;
924         while (writecache_wait_for_writeback(wc));
925
926         if (WC_MODE_PMEM(wc))
927                 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
928
929         writecache_poison_lists(wc);
930
931         wc_unlock(wc);
932 }
933
934 static int writecache_alloc_entries(struct dm_writecache *wc)
935 {
936         size_t b;
937
938         if (wc->entries)
939                 return 0;
940         wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
941         if (!wc->entries)
942                 return -ENOMEM;
943         for (b = 0; b < wc->n_blocks; b++) {
944                 struct wc_entry *e = &wc->entries[b];
945                 e->index = b;
946                 e->write_in_progress = false;
947                 cond_resched();
948         }
949
950         return 0;
951 }
952
953 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
954 {
955         struct dm_io_region region;
956         struct dm_io_request req;
957
958         region.bdev = wc->ssd_dev->bdev;
959         region.sector = wc->start_sector;
960         region.count = n_sectors;
961         req.bi_op = REQ_OP_READ;
962         req.bi_op_flags = REQ_SYNC;
963         req.mem.type = DM_IO_VMA;
964         req.mem.ptr.vma = (char *)wc->memory_map;
965         req.client = wc->dm_io;
966         req.notify.fn = NULL;
967
968         return dm_io(&req, 1, &region, NULL);
969 }
970
971 static void writecache_resume(struct dm_target *ti)
972 {
973         struct dm_writecache *wc = ti->private;
974         size_t b;
975         bool need_flush = false;
976         __le64 sb_seq_count;
977         int r;
978
979         wc_lock(wc);
980
981         wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
982
983         if (WC_MODE_PMEM(wc)) {
984                 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
985         } else {
986                 r = writecache_read_metadata(wc, wc->metadata_sectors);
987                 if (r) {
988                         size_t sb_entries_offset;
989                         writecache_error(wc, r, "unable to read metadata: %d", r);
990                         sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
991                         memset((char *)wc->memory_map + sb_entries_offset, -1,
992                                (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
993                 }
994         }
995
996         wc->tree = RB_ROOT;
997         INIT_LIST_HEAD(&wc->lru);
998         if (WC_MODE_SORT_FREELIST(wc)) {
999                 wc->freetree = RB_ROOT;
1000                 wc->current_free = NULL;
1001         } else {
1002                 INIT_LIST_HEAD(&wc->freelist);
1003         }
1004         wc->freelist_size = 0;
1005
1006         r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count,
1007                               sizeof(uint64_t));
1008         if (r) {
1009                 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
1010                 sb_seq_count = cpu_to_le64(0);
1011         }
1012         wc->seq_count = le64_to_cpu(sb_seq_count);
1013
1014 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
1015         for (b = 0; b < wc->n_blocks; b++) {
1016                 struct wc_entry *e = &wc->entries[b];
1017                 struct wc_memory_entry wme;
1018                 if (writecache_has_error(wc)) {
1019                         e->original_sector = -1;
1020                         e->seq_count = -1;
1021                         continue;
1022                 }
1023                 r = copy_mc_to_kernel(&wme, memory_entry(wc, e),
1024                                       sizeof(struct wc_memory_entry));
1025                 if (r) {
1026                         writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
1027                                          (unsigned long)b, r);
1028                         e->original_sector = -1;
1029                         e->seq_count = -1;
1030                 } else {
1031                         e->original_sector = le64_to_cpu(wme.original_sector);
1032                         e->seq_count = le64_to_cpu(wme.seq_count);
1033                 }
1034                 cond_resched();
1035         }
1036 #endif
1037         for (b = 0; b < wc->n_blocks; b++) {
1038                 struct wc_entry *e = &wc->entries[b];
1039                 if (!writecache_entry_is_committed(wc, e)) {
1040                         if (read_seq_count(wc, e) != -1) {
1041 erase_this:
1042                                 clear_seq_count(wc, e);
1043                                 need_flush = true;
1044                         }
1045                         writecache_add_to_freelist(wc, e);
1046                 } else {
1047                         struct wc_entry *old;
1048
1049                         old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
1050                         if (!old) {
1051                                 writecache_insert_entry(wc, e);
1052                         } else {
1053                                 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1054                                         writecache_error(wc, -EINVAL,
1055                                                  "two identical entries, position %llu, sector %llu, sequence %llu",
1056                                                  (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1057                                                  (unsigned long long)read_seq_count(wc, e));
1058                                 }
1059                                 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1060                                         goto erase_this;
1061                                 } else {
1062                                         writecache_free_entry(wc, old);
1063                                         writecache_insert_entry(wc, e);
1064                                         need_flush = true;
1065                                 }
1066                         }
1067                 }
1068                 cond_resched();
1069         }
1070
1071         if (need_flush) {
1072                 writecache_flush_all_metadata(wc);
1073                 writecache_commit_flushed(wc, false);
1074         }
1075
1076         writecache_verify_watermark(wc);
1077
1078         if (wc->max_age != MAX_AGE_UNSPECIFIED)
1079                 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1080
1081         wc_unlock(wc);
1082 }
1083
1084 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1085 {
1086         if (argc != 1)
1087                 return -EINVAL;
1088
1089         wc_lock(wc);
1090         if (dm_suspended(wc->ti)) {
1091                 wc_unlock(wc);
1092                 return -EBUSY;
1093         }
1094         if (writecache_has_error(wc)) {
1095                 wc_unlock(wc);
1096                 return -EIO;
1097         }
1098
1099         writecache_flush(wc);
1100         wc->writeback_all++;
1101         queue_work(wc->writeback_wq, &wc->writeback_work);
1102         wc_unlock(wc);
1103
1104         flush_workqueue(wc->writeback_wq);
1105
1106         wc_lock(wc);
1107         wc->writeback_all--;
1108         if (writecache_has_error(wc)) {
1109                 wc_unlock(wc);
1110                 return -EIO;
1111         }
1112         wc_unlock(wc);
1113
1114         return 0;
1115 }
1116
1117 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1118 {
1119         if (argc != 1)
1120                 return -EINVAL;
1121
1122         wc_lock(wc);
1123         wc->flush_on_suspend = true;
1124         wc_unlock(wc);
1125
1126         return 0;
1127 }
1128
1129 static void activate_cleaner(struct dm_writecache *wc)
1130 {
1131         wc->flush_on_suspend = true;
1132         wc->cleaner = true;
1133         wc->freelist_high_watermark = wc->n_blocks;
1134         wc->freelist_low_watermark = wc->n_blocks;
1135 }
1136
1137 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1138 {
1139         if (argc != 1)
1140                 return -EINVAL;
1141
1142         wc_lock(wc);
1143         activate_cleaner(wc);
1144         if (!dm_suspended(wc->ti))
1145                 writecache_verify_watermark(wc);
1146         wc_unlock(wc);
1147
1148         return 0;
1149 }
1150
1151 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1152                               char *result, unsigned maxlen)
1153 {
1154         int r = -EINVAL;
1155         struct dm_writecache *wc = ti->private;
1156
1157         if (!strcasecmp(argv[0], "flush"))
1158                 r = process_flush_mesg(argc, argv, wc);
1159         else if (!strcasecmp(argv[0], "flush_on_suspend"))
1160                 r = process_flush_on_suspend_mesg(argc, argv, wc);
1161         else if (!strcasecmp(argv[0], "cleaner"))
1162                 r = process_cleaner_mesg(argc, argv, wc);
1163         else
1164                 DMERR("unrecognised message received: %s", argv[0]);
1165
1166         return r;
1167 }
1168
1169 static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
1170 {
1171         /*
1172          * clflushopt performs better with block size 1024, 2048, 4096
1173          * non-temporal stores perform better with block size 512
1174          *
1175          * block size   512             1024            2048            4096
1176          * movnti       496 MB/s        642 MB/s        725 MB/s        744 MB/s
1177          * clflushopt   373 MB/s        688 MB/s        1.1 GB/s        1.2 GB/s
1178          *
1179          * We see that movnti performs better for 512-byte blocks, and
1180          * clflushopt performs better for 1024-byte and larger blocks. So, we
1181          * prefer clflushopt for sizes >= 768.
1182          *
1183          * NOTE: this happens to be the case now (with dm-writecache's single
1184          * threaded model) but re-evaluate this once memcpy_flushcache() is
1185          * enabled to use movdir64b which might invalidate this performance
1186          * advantage seen with cache-allocating-writes plus flushing.
1187          */
1188 #ifdef CONFIG_X86
1189         if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
1190             likely(boot_cpu_data.x86_clflush_size == 64) &&
1191             likely(size >= 768)) {
1192                 do {
1193                         memcpy((void *)dest, (void *)source, 64);
1194                         clflushopt((void *)dest);
1195                         dest += 64;
1196                         source += 64;
1197                         size -= 64;
1198                 } while (size >= 64);
1199                 return;
1200         }
1201 #endif
1202         memcpy_flushcache(dest, source, size);
1203 }
1204
1205 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1206 {
1207         void *buf;
1208         unsigned long flags;
1209         unsigned size;
1210         int rw = bio_data_dir(bio);
1211         unsigned remaining_size = wc->block_size;
1212
1213         do {
1214                 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1215                 buf = bvec_kmap_irq(&bv, &flags);
1216                 size = bv.bv_len;
1217                 if (unlikely(size > remaining_size))
1218                         size = remaining_size;
1219
1220                 if (rw == READ) {
1221                         int r;
1222                         r = copy_mc_to_kernel(buf, data, size);
1223                         flush_dcache_page(bio_page(bio));
1224                         if (unlikely(r)) {
1225                                 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1226                                 bio->bi_status = BLK_STS_IOERR;
1227                         }
1228                 } else {
1229                         flush_dcache_page(bio_page(bio));
1230                         memcpy_flushcache_optimized(data, buf, size);
1231                 }
1232
1233                 bvec_kunmap_irq(buf, &flags);
1234
1235                 data = (char *)data + size;
1236                 remaining_size -= size;
1237                 bio_advance(bio, size);
1238         } while (unlikely(remaining_size));
1239 }
1240
1241 static int writecache_flush_thread(void *data)
1242 {
1243         struct dm_writecache *wc = data;
1244
1245         while (1) {
1246                 struct bio *bio;
1247
1248                 wc_lock(wc);
1249                 bio = bio_list_pop(&wc->flush_list);
1250                 if (!bio) {
1251                         set_current_state(TASK_INTERRUPTIBLE);
1252                         wc_unlock(wc);
1253
1254                         if (unlikely(kthread_should_stop())) {
1255                                 set_current_state(TASK_RUNNING);
1256                                 break;
1257                         }
1258
1259                         schedule();
1260                         continue;
1261                 }
1262
1263                 if (bio_op(bio) == REQ_OP_DISCARD) {
1264                         writecache_discard(wc, bio->bi_iter.bi_sector,
1265                                            bio_end_sector(bio));
1266                         wc_unlock(wc);
1267                         bio_set_dev(bio, wc->dev->bdev);
1268                         submit_bio_noacct(bio);
1269                 } else {
1270                         writecache_flush(wc);
1271                         wc_unlock(wc);
1272                         if (writecache_has_error(wc))
1273                                 bio->bi_status = BLK_STS_IOERR;
1274                         bio_endio(bio);
1275                 }
1276         }
1277
1278         return 0;
1279 }
1280
1281 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1282 {
1283         if (bio_list_empty(&wc->flush_list))
1284                 wake_up_process(wc->flush_thread);
1285         bio_list_add(&wc->flush_list, bio);
1286 }
1287
1288 static int writecache_map(struct dm_target *ti, struct bio *bio)
1289 {
1290         struct wc_entry *e;
1291         struct dm_writecache *wc = ti->private;
1292
1293         bio->bi_private = NULL;
1294
1295         wc_lock(wc);
1296
1297         if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1298                 if (writecache_has_error(wc))
1299                         goto unlock_error;
1300                 if (WC_MODE_PMEM(wc)) {
1301                         writecache_flush(wc);
1302                         if (writecache_has_error(wc))
1303                                 goto unlock_error;
1304                         if (unlikely(wc->cleaner))
1305                                 goto unlock_remap_origin;
1306                         goto unlock_submit;
1307                 } else {
1308                         if (dm_bio_get_target_bio_nr(bio))
1309                                 goto unlock_remap_origin;
1310                         writecache_offload_bio(wc, bio);
1311                         goto unlock_return;
1312                 }
1313         }
1314
1315         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1316
1317         if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1318                                 (wc->block_size / 512 - 1)) != 0)) {
1319                 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1320                       (unsigned long long)bio->bi_iter.bi_sector,
1321                       bio->bi_iter.bi_size, wc->block_size);
1322                 goto unlock_error;
1323         }
1324
1325         if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1326                 if (writecache_has_error(wc))
1327                         goto unlock_error;
1328                 if (WC_MODE_PMEM(wc)) {
1329                         writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1330                         goto unlock_remap_origin;
1331                 } else {
1332                         writecache_offload_bio(wc, bio);
1333                         goto unlock_return;
1334                 }
1335         }
1336
1337         if (bio_data_dir(bio) == READ) {
1338 read_next_block:
1339                 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1340                 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1341                         if (WC_MODE_PMEM(wc)) {
1342                                 bio_copy_block(wc, bio, memory_data(wc, e));
1343                                 if (bio->bi_iter.bi_size)
1344                                         goto read_next_block;
1345                                 goto unlock_submit;
1346                         } else {
1347                                 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1348                                 bio_set_dev(bio, wc->ssd_dev->bdev);
1349                                 bio->bi_iter.bi_sector = cache_sector(wc, e);
1350                                 if (!writecache_entry_is_committed(wc, e))
1351                                         writecache_wait_for_ios(wc, WRITE);
1352                                 goto unlock_remap;
1353                         }
1354                 } else {
1355                         if (e) {
1356                                 sector_t next_boundary =
1357                                         read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1358                                 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1359                                         dm_accept_partial_bio(bio, next_boundary);
1360                                 }
1361                         }
1362                         goto unlock_remap_origin;
1363                 }
1364         } else {
1365                 do {
1366                         bool found_entry = false;
1367                         bool search_used = false;
1368                         if (writecache_has_error(wc))
1369                                 goto unlock_error;
1370                         e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1371                         if (e) {
1372                                 if (!writecache_entry_is_committed(wc, e)) {
1373                                         search_used = true;
1374                                         goto bio_copy;
1375                                 }
1376                                 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1377                                         wc->overwrote_committed = true;
1378                                         search_used = true;
1379                                         goto bio_copy;
1380                                 }
1381                                 found_entry = true;
1382                         } else {
1383                                 if (unlikely(wc->cleaner))
1384                                         goto direct_write;
1385                         }
1386                         e = writecache_pop_from_freelist(wc, (sector_t)-1);
1387                         if (unlikely(!e)) {
1388                                 if (!WC_MODE_PMEM(wc) && !found_entry) {
1389 direct_write:
1390                                         e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1391                                         if (e) {
1392                                                 sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1393                                                 BUG_ON(!next_boundary);
1394                                                 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1395                                                         dm_accept_partial_bio(bio, next_boundary);
1396                                                 }
1397                                         }
1398                                         goto unlock_remap_origin;
1399                                 }
1400                                 writecache_wait_on_freelist(wc);
1401                                 continue;
1402                         }
1403                         write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1404                         writecache_insert_entry(wc, e);
1405                         wc->uncommitted_blocks++;
1406 bio_copy:
1407                         if (WC_MODE_PMEM(wc)) {
1408                                 bio_copy_block(wc, bio, memory_data(wc, e));
1409                         } else {
1410                                 unsigned bio_size = wc->block_size;
1411                                 sector_t start_cache_sec = cache_sector(wc, e);
1412                                 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1413
1414                                 while (bio_size < bio->bi_iter.bi_size) {
1415                                         if (!search_used) {
1416                                                 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1417                                                 if (!f)
1418                                                         break;
1419                                                 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1420                                                                                 (bio_size >> SECTOR_SHIFT), wc->seq_count);
1421                                                 writecache_insert_entry(wc, f);
1422                                                 wc->uncommitted_blocks++;
1423                                         } else {
1424                                                 struct wc_entry *f;
1425                                                 struct rb_node *next = rb_next(&e->rb_node);
1426                                                 if (!next)
1427                                                         break;
1428                                                 f = container_of(next, struct wc_entry, rb_node);
1429                                                 if (f != e + 1)
1430                                                         break;
1431                                                 if (read_original_sector(wc, f) !=
1432                                                     read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1433                                                         break;
1434                                                 if (unlikely(f->write_in_progress))
1435                                                         break;
1436                                                 if (writecache_entry_is_committed(wc, f))
1437                                                         wc->overwrote_committed = true;
1438                                                 e = f;
1439                                         }
1440                                         bio_size += wc->block_size;
1441                                         current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1442                                 }
1443
1444                                 bio_set_dev(bio, wc->ssd_dev->bdev);
1445                                 bio->bi_iter.bi_sector = start_cache_sec;
1446                                 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1447
1448                                 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1449                                         wc->uncommitted_blocks = 0;
1450                                         queue_work(wc->writeback_wq, &wc->flush_work);
1451                                 } else {
1452                                         writecache_schedule_autocommit(wc);
1453                                 }
1454                                 goto unlock_remap;
1455                         }
1456                 } while (bio->bi_iter.bi_size);
1457
1458                 if (unlikely(bio->bi_opf & REQ_FUA ||
1459                              wc->uncommitted_blocks >= wc->autocommit_blocks))
1460                         writecache_flush(wc);
1461                 else
1462                         writecache_schedule_autocommit(wc);
1463                 goto unlock_submit;
1464         }
1465
1466 unlock_remap_origin:
1467         bio_set_dev(bio, wc->dev->bdev);
1468         wc_unlock(wc);
1469         return DM_MAPIO_REMAPPED;
1470
1471 unlock_remap:
1472         /* make sure that writecache_end_io decrements bio_in_progress: */
1473         bio->bi_private = (void *)1;
1474         atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1475         wc_unlock(wc);
1476         return DM_MAPIO_REMAPPED;
1477
1478 unlock_submit:
1479         wc_unlock(wc);
1480         bio_endio(bio);
1481         return DM_MAPIO_SUBMITTED;
1482
1483 unlock_return:
1484         wc_unlock(wc);
1485         return DM_MAPIO_SUBMITTED;
1486
1487 unlock_error:
1488         wc_unlock(wc);
1489         bio_io_error(bio);
1490         return DM_MAPIO_SUBMITTED;
1491 }
1492
1493 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1494 {
1495         struct dm_writecache *wc = ti->private;
1496
1497         if (bio->bi_private != NULL) {
1498                 int dir = bio_data_dir(bio);
1499                 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1500                         if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1501                                 wake_up(&wc->bio_in_progress_wait[dir]);
1502         }
1503         return 0;
1504 }
1505
1506 static int writecache_iterate_devices(struct dm_target *ti,
1507                                       iterate_devices_callout_fn fn, void *data)
1508 {
1509         struct dm_writecache *wc = ti->private;
1510
1511         return fn(ti, wc->dev, 0, ti->len, data);
1512 }
1513
1514 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1515 {
1516         struct dm_writecache *wc = ti->private;
1517
1518         if (limits->logical_block_size < wc->block_size)
1519                 limits->logical_block_size = wc->block_size;
1520
1521         if (limits->physical_block_size < wc->block_size)
1522                 limits->physical_block_size = wc->block_size;
1523
1524         if (limits->io_min < wc->block_size)
1525                 limits->io_min = wc->block_size;
1526 }
1527
1528
1529 static void writecache_writeback_endio(struct bio *bio)
1530 {
1531         struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1532         struct dm_writecache *wc = wb->wc;
1533         unsigned long flags;
1534
1535         raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1536         if (unlikely(list_empty(&wc->endio_list)))
1537                 wake_up_process(wc->endio_thread);
1538         list_add_tail(&wb->endio_entry, &wc->endio_list);
1539         raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1540 }
1541
1542 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1543 {
1544         struct copy_struct *c = ptr;
1545         struct dm_writecache *wc = c->wc;
1546
1547         c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1548
1549         raw_spin_lock_irq(&wc->endio_list_lock);
1550         if (unlikely(list_empty(&wc->endio_list)))
1551                 wake_up_process(wc->endio_thread);
1552         list_add_tail(&c->endio_entry, &wc->endio_list);
1553         raw_spin_unlock_irq(&wc->endio_list_lock);
1554 }
1555
1556 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1557 {
1558         unsigned i;
1559         struct writeback_struct *wb;
1560         struct wc_entry *e;
1561         unsigned long n_walked = 0;
1562
1563         do {
1564                 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1565                 list_del(&wb->endio_entry);
1566
1567                 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1568                         writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1569                                         "write error %d", wb->bio.bi_status);
1570                 i = 0;
1571                 do {
1572                         e = wb->wc_list[i];
1573                         BUG_ON(!e->write_in_progress);
1574                         e->write_in_progress = false;
1575                         INIT_LIST_HEAD(&e->lru);
1576                         if (!writecache_has_error(wc))
1577                                 writecache_free_entry(wc, e);
1578                         BUG_ON(!wc->writeback_size);
1579                         wc->writeback_size--;
1580                         n_walked++;
1581                         if (unlikely(n_walked >= ENDIO_LATENCY)) {
1582                                 writecache_commit_flushed(wc, false);
1583                                 wc_unlock(wc);
1584                                 wc_lock(wc);
1585                                 n_walked = 0;
1586                         }
1587                 } while (++i < wb->wc_list_n);
1588
1589                 if (wb->wc_list != wb->wc_list_inline)
1590                         kfree(wb->wc_list);
1591                 bio_put(&wb->bio);
1592         } while (!list_empty(list));
1593 }
1594
1595 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1596 {
1597         struct copy_struct *c;
1598         struct wc_entry *e;
1599
1600         do {
1601                 c = list_entry(list->next, struct copy_struct, endio_entry);
1602                 list_del(&c->endio_entry);
1603
1604                 if (unlikely(c->error))
1605                         writecache_error(wc, c->error, "copy error");
1606
1607                 e = c->e;
1608                 do {
1609                         BUG_ON(!e->write_in_progress);
1610                         e->write_in_progress = false;
1611                         INIT_LIST_HEAD(&e->lru);
1612                         if (!writecache_has_error(wc))
1613                                 writecache_free_entry(wc, e);
1614
1615                         BUG_ON(!wc->writeback_size);
1616                         wc->writeback_size--;
1617                         e++;
1618                 } while (--c->n_entries);
1619                 mempool_free(c, &wc->copy_pool);
1620         } while (!list_empty(list));
1621 }
1622
1623 static int writecache_endio_thread(void *data)
1624 {
1625         struct dm_writecache *wc = data;
1626
1627         while (1) {
1628                 struct list_head list;
1629
1630                 raw_spin_lock_irq(&wc->endio_list_lock);
1631                 if (!list_empty(&wc->endio_list))
1632                         goto pop_from_list;
1633                 set_current_state(TASK_INTERRUPTIBLE);
1634                 raw_spin_unlock_irq(&wc->endio_list_lock);
1635
1636                 if (unlikely(kthread_should_stop())) {
1637                         set_current_state(TASK_RUNNING);
1638                         break;
1639                 }
1640
1641                 schedule();
1642
1643                 continue;
1644
1645 pop_from_list:
1646                 list = wc->endio_list;
1647                 list.next->prev = list.prev->next = &list;
1648                 INIT_LIST_HEAD(&wc->endio_list);
1649                 raw_spin_unlock_irq(&wc->endio_list_lock);
1650
1651                 if (!WC_MODE_FUA(wc))
1652                         writecache_disk_flush(wc, wc->dev);
1653
1654                 wc_lock(wc);
1655
1656                 if (WC_MODE_PMEM(wc)) {
1657                         __writecache_endio_pmem(wc, &list);
1658                 } else {
1659                         __writecache_endio_ssd(wc, &list);
1660                         writecache_wait_for_ios(wc, READ);
1661                 }
1662
1663                 writecache_commit_flushed(wc, false);
1664
1665                 wc_unlock(wc);
1666         }
1667
1668         return 0;
1669 }
1670
1671 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1672 {
1673         struct dm_writecache *wc = wb->wc;
1674         unsigned block_size = wc->block_size;
1675         void *address = memory_data(wc, e);
1676
1677         persistent_memory_flush_cache(address, block_size);
1678
1679         if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
1680                 return true;
1681
1682         return bio_add_page(&wb->bio, persistent_memory_page(address),
1683                             block_size, persistent_memory_page_offset(address)) != 0;
1684 }
1685
1686 struct writeback_list {
1687         struct list_head list;
1688         size_t size;
1689 };
1690
1691 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1692 {
1693         if (unlikely(wc->max_writeback_jobs)) {
1694                 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1695                         wc_lock(wc);
1696                         while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1697                                 writecache_wait_on_freelist(wc);
1698                         wc_unlock(wc);
1699                 }
1700         }
1701         cond_resched();
1702 }
1703
1704 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1705 {
1706         struct wc_entry *e, *f;
1707         struct bio *bio;
1708         struct writeback_struct *wb;
1709         unsigned max_pages;
1710
1711         while (wbl->size) {
1712                 wbl->size--;
1713                 e = container_of(wbl->list.prev, struct wc_entry, lru);
1714                 list_del(&e->lru);
1715
1716                 max_pages = e->wc_list_contiguous;
1717
1718                 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1719                 wb = container_of(bio, struct writeback_struct, bio);
1720                 wb->wc = wc;
1721                 bio->bi_end_io = writecache_writeback_endio;
1722                 bio_set_dev(bio, wc->dev->bdev);
1723                 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1724                 if (max_pages <= WB_LIST_INLINE ||
1725                     unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1726                                                            GFP_NOIO | __GFP_NORETRY |
1727                                                            __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1728                         wb->wc_list = wb->wc_list_inline;
1729                         max_pages = WB_LIST_INLINE;
1730                 }
1731
1732                 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1733
1734                 wb->wc_list[0] = e;
1735                 wb->wc_list_n = 1;
1736
1737                 while (wbl->size && wb->wc_list_n < max_pages) {
1738                         f = container_of(wbl->list.prev, struct wc_entry, lru);
1739                         if (read_original_sector(wc, f) !=
1740                             read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1741                                 break;
1742                         if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1743                                 break;
1744                         wbl->size--;
1745                         list_del(&f->lru);
1746                         wb->wc_list[wb->wc_list_n++] = f;
1747                         e = f;
1748                 }
1749                 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1750                 if (writecache_has_error(wc)) {
1751                         bio->bi_status = BLK_STS_IOERR;
1752                         bio_endio(bio);
1753                 } else if (unlikely(!bio_sectors(bio))) {
1754                         bio->bi_status = BLK_STS_OK;
1755                         bio_endio(bio);
1756                 } else {
1757                         submit_bio(bio);
1758                 }
1759
1760                 __writeback_throttle(wc, wbl);
1761         }
1762 }
1763
1764 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1765 {
1766         struct wc_entry *e, *f;
1767         struct dm_io_region from, to;
1768         struct copy_struct *c;
1769
1770         while (wbl->size) {
1771                 unsigned n_sectors;
1772
1773                 wbl->size--;
1774                 e = container_of(wbl->list.prev, struct wc_entry, lru);
1775                 list_del(&e->lru);
1776
1777                 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1778
1779                 from.bdev = wc->ssd_dev->bdev;
1780                 from.sector = cache_sector(wc, e);
1781                 from.count = n_sectors;
1782                 to.bdev = wc->dev->bdev;
1783                 to.sector = read_original_sector(wc, e);
1784                 to.count = n_sectors;
1785
1786                 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1787                 c->wc = wc;
1788                 c->e = e;
1789                 c->n_entries = e->wc_list_contiguous;
1790
1791                 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1792                         wbl->size--;
1793                         f = container_of(wbl->list.prev, struct wc_entry, lru);
1794                         BUG_ON(f != e + 1);
1795                         list_del(&f->lru);
1796                         e = f;
1797                 }
1798
1799                 if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
1800                         if (to.sector >= wc->data_device_sectors) {
1801                                 writecache_copy_endio(0, 0, c);
1802                                 continue;
1803                         }
1804                         from.count = to.count = wc->data_device_sectors - to.sector;
1805                 }
1806
1807                 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1808
1809                 __writeback_throttle(wc, wbl);
1810         }
1811 }
1812
1813 static void writecache_writeback(struct work_struct *work)
1814 {
1815         struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1816         struct blk_plug plug;
1817         struct wc_entry *f, *g, *e = NULL;
1818         struct rb_node *node, *next_node;
1819         struct list_head skipped;
1820         struct writeback_list wbl;
1821         unsigned long n_walked;
1822
1823         wc_lock(wc);
1824 restart:
1825         if (writecache_has_error(wc)) {
1826                 wc_unlock(wc);
1827                 return;
1828         }
1829
1830         if (unlikely(wc->writeback_all)) {
1831                 if (writecache_wait_for_writeback(wc))
1832                         goto restart;
1833         }
1834
1835         if (wc->overwrote_committed) {
1836                 writecache_wait_for_ios(wc, WRITE);
1837         }
1838
1839         n_walked = 0;
1840         INIT_LIST_HEAD(&skipped);
1841         INIT_LIST_HEAD(&wbl.list);
1842         wbl.size = 0;
1843         while (!list_empty(&wc->lru) &&
1844                (wc->writeback_all ||
1845                 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1846                 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1847                  wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1848
1849                 n_walked++;
1850                 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1851                     likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1852                         queue_work(wc->writeback_wq, &wc->writeback_work);
1853                         break;
1854                 }
1855
1856                 if (unlikely(wc->writeback_all)) {
1857                         if (unlikely(!e)) {
1858                                 writecache_flush(wc);
1859                                 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1860                         } else
1861                                 e = g;
1862                 } else
1863                         e = container_of(wc->lru.prev, struct wc_entry, lru);
1864                 BUG_ON(e->write_in_progress);
1865                 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1866                         writecache_flush(wc);
1867                 }
1868                 node = rb_prev(&e->rb_node);
1869                 if (node) {
1870                         f = container_of(node, struct wc_entry, rb_node);
1871                         if (unlikely(read_original_sector(wc, f) ==
1872                                      read_original_sector(wc, e))) {
1873                                 BUG_ON(!f->write_in_progress);
1874                                 list_del(&e->lru);
1875                                 list_add(&e->lru, &skipped);
1876                                 cond_resched();
1877                                 continue;
1878                         }
1879                 }
1880                 wc->writeback_size++;
1881                 list_del(&e->lru);
1882                 list_add(&e->lru, &wbl.list);
1883                 wbl.size++;
1884                 e->write_in_progress = true;
1885                 e->wc_list_contiguous = 1;
1886
1887                 f = e;
1888
1889                 while (1) {
1890                         next_node = rb_next(&f->rb_node);
1891                         if (unlikely(!next_node))
1892                                 break;
1893                         g = container_of(next_node, struct wc_entry, rb_node);
1894                         if (unlikely(read_original_sector(wc, g) ==
1895                             read_original_sector(wc, f))) {
1896                                 f = g;
1897                                 continue;
1898                         }
1899                         if (read_original_sector(wc, g) !=
1900                             read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1901                                 break;
1902                         if (unlikely(g->write_in_progress))
1903                                 break;
1904                         if (unlikely(!writecache_entry_is_committed(wc, g)))
1905                                 break;
1906
1907                         if (!WC_MODE_PMEM(wc)) {
1908                                 if (g != f + 1)
1909                                         break;
1910                         }
1911
1912                         n_walked++;
1913                         //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1914                         //      break;
1915
1916                         wc->writeback_size++;
1917                         list_del(&g->lru);
1918                         list_add(&g->lru, &wbl.list);
1919                         wbl.size++;
1920                         g->write_in_progress = true;
1921                         g->wc_list_contiguous = BIO_MAX_PAGES;
1922                         f = g;
1923                         e->wc_list_contiguous++;
1924                         if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1925                                 if (unlikely(wc->writeback_all)) {
1926                                         next_node = rb_next(&f->rb_node);
1927                                         if (likely(next_node))
1928                                                 g = container_of(next_node, struct wc_entry, rb_node);
1929                                 }
1930                                 break;
1931                         }
1932                 }
1933                 cond_resched();
1934         }
1935
1936         if (!list_empty(&skipped)) {
1937                 list_splice_tail(&skipped, &wc->lru);
1938                 /*
1939                  * If we didn't do any progress, we must wait until some
1940                  * writeback finishes to avoid burning CPU in a loop
1941                  */
1942                 if (unlikely(!wbl.size))
1943                         writecache_wait_for_writeback(wc);
1944         }
1945
1946         wc_unlock(wc);
1947
1948         blk_start_plug(&plug);
1949
1950         if (WC_MODE_PMEM(wc))
1951                 __writecache_writeback_pmem(wc, &wbl);
1952         else
1953                 __writecache_writeback_ssd(wc, &wbl);
1954
1955         blk_finish_plug(&plug);
1956
1957         if (unlikely(wc->writeback_all)) {
1958                 wc_lock(wc);
1959                 while (writecache_wait_for_writeback(wc));
1960                 wc_unlock(wc);
1961         }
1962 }
1963
1964 static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1965                                  size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1966 {
1967         uint64_t n_blocks, offset;
1968         struct wc_entry e;
1969
1970         n_blocks = device_size;
1971         do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1972
1973         while (1) {
1974                 if (!n_blocks)
1975                         return -ENOSPC;
1976                 /* Verify the following entries[n_blocks] won't overflow */
1977                 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1978                                  sizeof(struct wc_memory_entry)))
1979                         return -EFBIG;
1980                 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1981                 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1982                 if (offset + n_blocks * block_size <= device_size)
1983                         break;
1984                 n_blocks--;
1985         }
1986
1987         /* check if the bit field overflows */
1988         e.index = n_blocks;
1989         if (e.index != n_blocks)
1990                 return -EFBIG;
1991
1992         if (n_blocks_p)
1993                 *n_blocks_p = n_blocks;
1994         if (n_metadata_blocks_p)
1995                 *n_metadata_blocks_p = offset >> __ffs(block_size);
1996         return 0;
1997 }
1998
1999 static int init_memory(struct dm_writecache *wc)
2000 {
2001         size_t b;
2002         int r;
2003
2004         r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
2005         if (r)
2006                 return r;
2007
2008         r = writecache_alloc_entries(wc);
2009         if (r)
2010                 return r;
2011
2012         for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
2013                 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
2014         pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
2015         pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
2016         pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
2017         pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
2018
2019         for (b = 0; b < wc->n_blocks; b++) {
2020                 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
2021                 cond_resched();
2022         }
2023
2024         writecache_flush_all_metadata(wc);
2025         writecache_commit_flushed(wc, false);
2026         pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
2027         writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
2028         writecache_commit_flushed(wc, false);
2029
2030         return 0;
2031 }
2032
2033 static void writecache_dtr(struct dm_target *ti)
2034 {
2035         struct dm_writecache *wc = ti->private;
2036
2037         if (!wc)
2038                 return;
2039
2040         if (wc->endio_thread)
2041                 kthread_stop(wc->endio_thread);
2042
2043         if (wc->flush_thread)
2044                 kthread_stop(wc->flush_thread);
2045
2046         bioset_exit(&wc->bio_set);
2047
2048         mempool_exit(&wc->copy_pool);
2049
2050         if (wc->writeback_wq)
2051                 destroy_workqueue(wc->writeback_wq);
2052
2053         if (wc->dev)
2054                 dm_put_device(ti, wc->dev);
2055
2056         if (wc->ssd_dev)
2057                 dm_put_device(ti, wc->ssd_dev);
2058
2059         if (wc->entries)
2060                 vfree(wc->entries);
2061
2062         if (wc->memory_map) {
2063                 if (WC_MODE_PMEM(wc))
2064                         persistent_memory_release(wc);
2065                 else
2066                         vfree(wc->memory_map);
2067         }
2068
2069         if (wc->dm_kcopyd)
2070                 dm_kcopyd_client_destroy(wc->dm_kcopyd);
2071
2072         if (wc->dm_io)
2073                 dm_io_client_destroy(wc->dm_io);
2074
2075         if (wc->dirty_bitmap)
2076                 vfree(wc->dirty_bitmap);
2077
2078         kfree(wc);
2079 }
2080
2081 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2082 {
2083         struct dm_writecache *wc;
2084         struct dm_arg_set as;
2085         const char *string;
2086         unsigned opt_params;
2087         size_t offset, data_size;
2088         int i, r;
2089         char dummy;
2090         int high_wm_percent = HIGH_WATERMARK;
2091         int low_wm_percent = LOW_WATERMARK;
2092         uint64_t x;
2093         struct wc_memory_superblock s;
2094
2095         static struct dm_arg _args[] = {
2096                 {0, 16, "Invalid number of feature args"},
2097         };
2098
2099         as.argc = argc;
2100         as.argv = argv;
2101
2102         wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
2103         if (!wc) {
2104                 ti->error = "Cannot allocate writecache structure";
2105                 r = -ENOMEM;
2106                 goto bad;
2107         }
2108         ti->private = wc;
2109         wc->ti = ti;
2110
2111         mutex_init(&wc->lock);
2112         wc->max_age = MAX_AGE_UNSPECIFIED;
2113         writecache_poison_lists(wc);
2114         init_waitqueue_head(&wc->freelist_wait);
2115         timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
2116         timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
2117
2118         for (i = 0; i < 2; i++) {
2119                 atomic_set(&wc->bio_in_progress[i], 0);
2120                 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
2121         }
2122
2123         wc->dm_io = dm_io_client_create();
2124         if (IS_ERR(wc->dm_io)) {
2125                 r = PTR_ERR(wc->dm_io);
2126                 ti->error = "Unable to allocate dm-io client";
2127                 wc->dm_io = NULL;
2128                 goto bad;
2129         }
2130
2131         wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2132         if (!wc->writeback_wq) {
2133                 r = -ENOMEM;
2134                 ti->error = "Could not allocate writeback workqueue";
2135                 goto bad;
2136         }
2137         INIT_WORK(&wc->writeback_work, writecache_writeback);
2138         INIT_WORK(&wc->flush_work, writecache_flush_work);
2139
2140         raw_spin_lock_init(&wc->endio_list_lock);
2141         INIT_LIST_HEAD(&wc->endio_list);
2142         wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
2143         if (IS_ERR(wc->endio_thread)) {
2144                 r = PTR_ERR(wc->endio_thread);
2145                 wc->endio_thread = NULL;
2146                 ti->error = "Couldn't spawn endio thread";
2147                 goto bad;
2148         }
2149         wake_up_process(wc->endio_thread);
2150
2151         /*
2152          * Parse the mode (pmem or ssd)
2153          */
2154         string = dm_shift_arg(&as);
2155         if (!string)
2156                 goto bad_arguments;
2157
2158         if (!strcasecmp(string, "s")) {
2159                 wc->pmem_mode = false;
2160         } else if (!strcasecmp(string, "p")) {
2161 #ifdef DM_WRITECACHE_HAS_PMEM
2162                 wc->pmem_mode = true;
2163                 wc->writeback_fua = true;
2164 #else
2165                 /*
2166                  * If the architecture doesn't support persistent memory or
2167                  * the kernel doesn't support any DAX drivers, this driver can
2168                  * only be used in SSD-only mode.
2169                  */
2170                 r = -EOPNOTSUPP;
2171                 ti->error = "Persistent memory or DAX not supported on this system";
2172                 goto bad;
2173 #endif
2174         } else {
2175                 goto bad_arguments;
2176         }
2177
2178         if (WC_MODE_PMEM(wc)) {
2179                 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2180                                 offsetof(struct writeback_struct, bio),
2181                                 BIOSET_NEED_BVECS);
2182                 if (r) {
2183                         ti->error = "Could not allocate bio set";
2184                         goto bad;
2185                 }
2186         } else {
2187                 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2188                 if (r) {
2189                         ti->error = "Could not allocate mempool";
2190                         goto bad;
2191                 }
2192         }
2193
2194         /*
2195          * Parse the origin data device
2196          */
2197         string = dm_shift_arg(&as);
2198         if (!string)
2199                 goto bad_arguments;
2200         r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2201         if (r) {
2202                 ti->error = "Origin data device lookup failed";
2203                 goto bad;
2204         }
2205
2206         /*
2207          * Parse cache data device (be it pmem or ssd)
2208          */
2209         string = dm_shift_arg(&as);
2210         if (!string)
2211                 goto bad_arguments;
2212
2213         r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2214         if (r) {
2215                 ti->error = "Cache data device lookup failed";
2216                 goto bad;
2217         }
2218         wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2219
2220         /*
2221          * Parse the cache block size
2222          */
2223         string = dm_shift_arg(&as);
2224         if (!string)
2225                 goto bad_arguments;
2226         if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2227             wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2228             (wc->block_size & (wc->block_size - 1))) {
2229                 r = -EINVAL;
2230                 ti->error = "Invalid block size";
2231                 goto bad;
2232         }
2233         if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2234             wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2235                 r = -EINVAL;
2236                 ti->error = "Block size is smaller than device logical block size";
2237                 goto bad;
2238         }
2239         wc->block_size_bits = __ffs(wc->block_size);
2240
2241         wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2242         wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2243         wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2244
2245         /*
2246          * Parse optional arguments
2247          */
2248         r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2249         if (r)
2250                 goto bad;
2251
2252         while (opt_params) {
2253                 string = dm_shift_arg(&as), opt_params--;
2254                 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2255                         unsigned long long start_sector;
2256                         string = dm_shift_arg(&as), opt_params--;
2257                         if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2258                                 goto invalid_optional;
2259                         wc->start_sector = start_sector;
2260                         wc->start_sector_set = true;
2261                         if (wc->start_sector != start_sector ||
2262                             wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2263                                 goto invalid_optional;
2264                 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2265                         string = dm_shift_arg(&as), opt_params--;
2266                         if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2267                                 goto invalid_optional;
2268                         if (high_wm_percent < 0 || high_wm_percent > 100)
2269                                 goto invalid_optional;
2270                         wc->high_wm_percent_value = high_wm_percent;
2271                         wc->high_wm_percent_set = true;
2272                 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2273                         string = dm_shift_arg(&as), opt_params--;
2274                         if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2275                                 goto invalid_optional;
2276                         if (low_wm_percent < 0 || low_wm_percent > 100)
2277                                 goto invalid_optional;
2278                         wc->low_wm_percent_value = low_wm_percent;
2279                         wc->low_wm_percent_set = true;
2280                 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2281                         string = dm_shift_arg(&as), opt_params--;
2282                         if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2283                                 goto invalid_optional;
2284                         wc->max_writeback_jobs_set = true;
2285                 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2286                         string = dm_shift_arg(&as), opt_params--;
2287                         if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2288                                 goto invalid_optional;
2289                         wc->autocommit_blocks_set = true;
2290                 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2291                         unsigned autocommit_msecs;
2292                         string = dm_shift_arg(&as), opt_params--;
2293                         if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2294                                 goto invalid_optional;
2295                         if (autocommit_msecs > 3600000)
2296                                 goto invalid_optional;
2297                         wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2298                         wc->autocommit_time_value = autocommit_msecs;
2299                         wc->autocommit_time_set = true;
2300                 } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2301                         unsigned max_age_msecs;
2302                         string = dm_shift_arg(&as), opt_params--;
2303                         if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2304                                 goto invalid_optional;
2305                         if (max_age_msecs > 86400000)
2306                                 goto invalid_optional;
2307                         wc->max_age = msecs_to_jiffies(max_age_msecs);
2308                         wc->max_age_set = true;
2309                         wc->max_age_value = max_age_msecs;
2310                 } else if (!strcasecmp(string, "cleaner")) {
2311                         wc->cleaner_set = true;
2312                         wc->cleaner = true;
2313                 } else if (!strcasecmp(string, "fua")) {
2314                         if (WC_MODE_PMEM(wc)) {
2315                                 wc->writeback_fua = true;
2316                                 wc->writeback_fua_set = true;
2317                         } else goto invalid_optional;
2318                 } else if (!strcasecmp(string, "nofua")) {
2319                         if (WC_MODE_PMEM(wc)) {
2320                                 wc->writeback_fua = false;
2321                                 wc->writeback_fua_set = true;
2322                         } else goto invalid_optional;
2323                 } else {
2324 invalid_optional:
2325                         r = -EINVAL;
2326                         ti->error = "Invalid optional argument";
2327                         goto bad;
2328                 }
2329         }
2330
2331         if (high_wm_percent < low_wm_percent) {
2332                 r = -EINVAL;
2333                 ti->error = "High watermark must be greater than or equal to low watermark";
2334                 goto bad;
2335         }
2336
2337         if (WC_MODE_PMEM(wc)) {
2338                 if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2339                         r = -EOPNOTSUPP;
2340                         ti->error = "Asynchronous persistent memory not supported as pmem cache";
2341                         goto bad;
2342                 }
2343
2344                 r = persistent_memory_claim(wc);
2345                 if (r) {
2346                         ti->error = "Unable to map persistent memory for cache";
2347                         goto bad;
2348                 }
2349         } else {
2350                 size_t n_blocks, n_metadata_blocks;
2351                 uint64_t n_bitmap_bits;
2352
2353                 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2354
2355                 bio_list_init(&wc->flush_list);
2356                 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2357                 if (IS_ERR(wc->flush_thread)) {
2358                         r = PTR_ERR(wc->flush_thread);
2359                         wc->flush_thread = NULL;
2360                         ti->error = "Couldn't spawn flush thread";
2361                         goto bad;
2362                 }
2363                 wake_up_process(wc->flush_thread);
2364
2365                 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2366                                           &n_blocks, &n_metadata_blocks);
2367                 if (r) {
2368                         ti->error = "Invalid device size";
2369                         goto bad;
2370                 }
2371
2372                 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2373                                  BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2374                 /* this is limitation of test_bit functions */
2375                 if (n_bitmap_bits > 1U << 31) {
2376                         r = -EFBIG;
2377                         ti->error = "Invalid device size";
2378                         goto bad;
2379                 }
2380
2381                 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2382                 if (!wc->memory_map) {
2383                         r = -ENOMEM;
2384                         ti->error = "Unable to allocate memory for metadata";
2385                         goto bad;
2386                 }
2387
2388                 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2389                 if (IS_ERR(wc->dm_kcopyd)) {
2390                         r = PTR_ERR(wc->dm_kcopyd);
2391                         ti->error = "Unable to allocate dm-kcopyd client";
2392                         wc->dm_kcopyd = NULL;
2393                         goto bad;
2394                 }
2395
2396                 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2397                 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2398                         BITS_PER_LONG * sizeof(unsigned long);
2399                 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2400                 if (!wc->dirty_bitmap) {
2401                         r = -ENOMEM;
2402                         ti->error = "Unable to allocate dirty bitmap";
2403                         goto bad;
2404                 }
2405
2406                 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2407                 if (r) {
2408                         ti->error = "Unable to read first block of metadata";
2409                         goto bad;
2410                 }
2411         }
2412
2413         r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock));
2414         if (r) {
2415                 ti->error = "Hardware memory error when reading superblock";
2416                 goto bad;
2417         }
2418         if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2419                 r = init_memory(wc);
2420                 if (r) {
2421                         ti->error = "Unable to initialize device";
2422                         goto bad;
2423                 }
2424                 r = copy_mc_to_kernel(&s, sb(wc),
2425                                       sizeof(struct wc_memory_superblock));
2426                 if (r) {
2427                         ti->error = "Hardware memory error when reading superblock";
2428                         goto bad;
2429                 }
2430         }
2431
2432         if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2433                 ti->error = "Invalid magic in the superblock";
2434                 r = -EINVAL;
2435                 goto bad;
2436         }
2437
2438         if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2439                 ti->error = "Invalid version in the superblock";
2440                 r = -EINVAL;
2441                 goto bad;
2442         }
2443
2444         if (le32_to_cpu(s.block_size) != wc->block_size) {
2445                 ti->error = "Block size does not match superblock";
2446                 r = -EINVAL;
2447                 goto bad;
2448         }
2449
2450         wc->n_blocks = le64_to_cpu(s.n_blocks);
2451
2452         offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2453         if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2454 overflow:
2455                 ti->error = "Overflow in size calculation";
2456                 r = -EINVAL;
2457                 goto bad;
2458         }
2459         offset += sizeof(struct wc_memory_superblock);
2460         if (offset < sizeof(struct wc_memory_superblock))
2461                 goto overflow;
2462         offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2463         data_size = wc->n_blocks * (size_t)wc->block_size;
2464         if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2465             (offset + data_size < offset))
2466                 goto overflow;
2467         if (offset + data_size > wc->memory_map_size) {
2468                 ti->error = "Memory area is too small";
2469                 r = -EINVAL;
2470                 goto bad;
2471         }
2472
2473         wc->metadata_sectors = offset >> SECTOR_SHIFT;
2474         wc->block_start = (char *)sb(wc) + offset;
2475
2476         x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2477         x += 50;
2478         do_div(x, 100);
2479         wc->freelist_high_watermark = x;
2480         x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2481         x += 50;
2482         do_div(x, 100);
2483         wc->freelist_low_watermark = x;
2484
2485         if (wc->cleaner)
2486                 activate_cleaner(wc);
2487
2488         r = writecache_alloc_entries(wc);
2489         if (r) {
2490                 ti->error = "Cannot allocate memory";
2491                 goto bad;
2492         }
2493
2494         ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
2495         ti->flush_supported = true;
2496         ti->num_discard_bios = 1;
2497
2498         if (WC_MODE_PMEM(wc))
2499                 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2500
2501         return 0;
2502
2503 bad_arguments:
2504         r = -EINVAL;
2505         ti->error = "Bad arguments";
2506 bad:
2507         writecache_dtr(ti);
2508         return r;
2509 }
2510
2511 static void writecache_status(struct dm_target *ti, status_type_t type,
2512                               unsigned status_flags, char *result, unsigned maxlen)
2513 {
2514         struct dm_writecache *wc = ti->private;
2515         unsigned extra_args;
2516         unsigned sz = 0;
2517
2518         switch (type) {
2519         case STATUSTYPE_INFO:
2520                 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2521                        (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2522                        (unsigned long long)wc->writeback_size);
2523                 break;
2524         case STATUSTYPE_TABLE:
2525                 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2526                                 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2527                 extra_args = 0;
2528                 if (wc->start_sector_set)
2529                         extra_args += 2;
2530                 if (wc->high_wm_percent_set)
2531                         extra_args += 2;
2532                 if (wc->low_wm_percent_set)
2533                         extra_args += 2;
2534                 if (wc->max_writeback_jobs_set)
2535                         extra_args += 2;
2536                 if (wc->autocommit_blocks_set)
2537                         extra_args += 2;
2538                 if (wc->autocommit_time_set)
2539                         extra_args += 2;
2540                 if (wc->max_age_set)
2541                         extra_args += 2;
2542                 if (wc->cleaner_set)
2543                         extra_args++;
2544                 if (wc->writeback_fua_set)
2545                         extra_args++;
2546
2547                 DMEMIT("%u", extra_args);
2548                 if (wc->start_sector_set)
2549                         DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2550                 if (wc->high_wm_percent_set)
2551                         DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
2552                 if (wc->low_wm_percent_set)
2553                         DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
2554                 if (wc->max_writeback_jobs_set)
2555                         DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2556                 if (wc->autocommit_blocks_set)
2557                         DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2558                 if (wc->autocommit_time_set)
2559                         DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
2560                 if (wc->max_age_set)
2561                         DMEMIT(" max_age %u", wc->max_age_value);
2562                 if (wc->cleaner_set)
2563                         DMEMIT(" cleaner");
2564                 if (wc->writeback_fua_set)
2565                         DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2566                 break;
2567         }
2568 }
2569
2570 static struct target_type writecache_target = {
2571         .name                   = "writecache",
2572         .version                = {1, 4, 0},
2573         .module                 = THIS_MODULE,
2574         .ctr                    = writecache_ctr,
2575         .dtr                    = writecache_dtr,
2576         .status                 = writecache_status,
2577         .postsuspend            = writecache_suspend,
2578         .resume                 = writecache_resume,
2579         .message                = writecache_message,
2580         .map                    = writecache_map,
2581         .end_io                 = writecache_end_io,
2582         .iterate_devices        = writecache_iterate_devices,
2583         .io_hints               = writecache_io_hints,
2584 };
2585
2586 static int __init dm_writecache_init(void)
2587 {
2588         int r;
2589
2590         r = dm_register_target(&writecache_target);
2591         if (r < 0) {
2592                 DMERR("register failed %d", r);
2593                 return r;
2594         }
2595
2596         return 0;
2597 }
2598
2599 static void __exit dm_writecache_exit(void)
2600 {
2601         dm_unregister_target(&writecache_target);
2602 }
2603
2604 module_init(dm_writecache_init);
2605 module_exit(dm_writecache_exit);
2606
2607 MODULE_DESCRIPTION(DM_NAME " writecache target");
2608 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2609 MODULE_LICENSE("GPL");