2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
19 #include <linux/stacktrace.h>
21 #define DM_MSG_PREFIX "bufio"
24 * Memory management policy:
25 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
26 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
27 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
28 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
31 #define DM_BUFIO_MIN_BUFFERS 8
33 #define DM_BUFIO_MEMORY_PERCENT 2
34 #define DM_BUFIO_VMALLOC_PERCENT 25
35 #define DM_BUFIO_WRITEBACK_PERCENT 75
38 * Check buffer ages in this interval (seconds)
40 #define DM_BUFIO_WORK_TIMER_SECS 30
43 * Free buffers when they are older than this (seconds)
45 #define DM_BUFIO_DEFAULT_AGE_SECS 300
48 * The nr of bytes of cached data to keep around.
50 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
53 * The number of bvec entries that are embedded directly in the buffer.
54 * If the chunk size is larger, dm-io is used to do the io.
56 #define DM_BUFIO_INLINE_VECS 16
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
87 struct dm_bufio_client {
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
93 struct block_device *bdev;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
102 struct dm_io_client *dm_io;
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
107 unsigned minimum_buffers;
109 struct rb_root buffer_tree;
110 wait_queue_head_t free_buffer_wait;
112 int async_write_error;
114 struct list_head client_list;
115 struct shrinker shrinker;
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
132 DATA_MODE_GET_FREE_PAGES = 1,
133 DATA_MODE_VMALLOC = 2,
139 struct list_head lru_list;
142 enum data_mode data_mode;
143 unsigned char list_mode; /* LIST_* */
148 unsigned long last_accessed;
149 struct dm_bufio_client *c;
150 struct list_head write_list;
152 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
153 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
155 struct stack_trace stack_trace;
156 unsigned long stack_entries[MAX_STACK];
160 /*----------------------------------------------------------------*/
162 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
163 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
165 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
167 unsigned ret = c->blocks_per_page_bits - 1;
169 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
174 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
175 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
177 #define dm_bufio_in_request() (!!current->bio_list)
179 static void dm_bufio_lock(struct dm_bufio_client *c)
181 mutex_lock_nested(&c->lock, dm_bufio_in_request());
184 static int dm_bufio_trylock(struct dm_bufio_client *c)
186 return mutex_trylock(&c->lock);
189 static void dm_bufio_unlock(struct dm_bufio_client *c)
191 mutex_unlock(&c->lock);
194 /*----------------------------------------------------------------*/
197 * Default cache size: available memory divided by the ratio.
199 static unsigned long dm_bufio_default_cache_size;
202 * Total cache size set by the user.
204 static unsigned long dm_bufio_cache_size;
207 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
208 * at any time. If it disagrees, the user has changed cache size.
210 static unsigned long dm_bufio_cache_size_latch;
212 static DEFINE_SPINLOCK(param_spinlock);
215 * Buffers are freed after this timeout
217 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
218 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
220 static unsigned long dm_bufio_peak_allocated;
221 static unsigned long dm_bufio_allocated_kmem_cache;
222 static unsigned long dm_bufio_allocated_get_free_pages;
223 static unsigned long dm_bufio_allocated_vmalloc;
224 static unsigned long dm_bufio_current_allocated;
226 /*----------------------------------------------------------------*/
229 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
231 static unsigned long dm_bufio_cache_size_per_client;
234 * The current number of clients.
236 static int dm_bufio_client_count;
239 * The list of all clients.
241 static LIST_HEAD(dm_bufio_all_clients);
244 * This mutex protects dm_bufio_cache_size_latch,
245 * dm_bufio_cache_size_per_client and dm_bufio_client_count
247 static DEFINE_MUTEX(dm_bufio_clients_lock);
249 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
250 static void buffer_record_stack(struct dm_buffer *b)
252 b->stack_trace.nr_entries = 0;
253 b->stack_trace.max_entries = MAX_STACK;
254 b->stack_trace.entries = b->stack_entries;
255 b->stack_trace.skip = 2;
256 save_stack_trace(&b->stack_trace);
260 /*----------------------------------------------------------------
261 * A red/black tree acts as an index for all the buffers.
262 *--------------------------------------------------------------*/
263 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
265 struct rb_node *n = c->buffer_tree.rb_node;
269 b = container_of(n, struct dm_buffer, node);
271 if (b->block == block)
274 n = (b->block < block) ? n->rb_left : n->rb_right;
280 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
282 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
283 struct dm_buffer *found;
286 found = container_of(*new, struct dm_buffer, node);
288 if (found->block == b->block) {
294 new = (found->block < b->block) ?
295 &((*new)->rb_left) : &((*new)->rb_right);
298 rb_link_node(&b->node, parent, new);
299 rb_insert_color(&b->node, &c->buffer_tree);
302 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
304 rb_erase(&b->node, &c->buffer_tree);
307 /*----------------------------------------------------------------*/
309 static void adjust_total_allocated(enum data_mode data_mode, long diff)
311 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
312 &dm_bufio_allocated_kmem_cache,
313 &dm_bufio_allocated_get_free_pages,
314 &dm_bufio_allocated_vmalloc,
317 spin_lock(¶m_spinlock);
319 *class_ptr[data_mode] += diff;
321 dm_bufio_current_allocated += diff;
323 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
324 dm_bufio_peak_allocated = dm_bufio_current_allocated;
326 spin_unlock(¶m_spinlock);
330 * Change the number of clients and recalculate per-client limit.
332 static void __cache_size_refresh(void)
334 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
335 BUG_ON(dm_bufio_client_count < 0);
337 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
340 * Use default if set to 0 and report the actual cache size used.
342 if (!dm_bufio_cache_size_latch) {
343 (void)cmpxchg(&dm_bufio_cache_size, 0,
344 dm_bufio_default_cache_size);
345 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
348 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
349 (dm_bufio_client_count ? : 1);
353 * Allocating buffer data.
355 * Small buffers are allocated with kmem_cache, to use space optimally.
357 * For large buffers, we choose between get_free_pages and vmalloc.
358 * Each has advantages and disadvantages.
360 * __get_free_pages can randomly fail if the memory is fragmented.
361 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
362 * as low as 128M) so using it for caching is not appropriate.
364 * If the allocation may fail we use __get_free_pages. Memory fragmentation
365 * won't have a fatal effect here, but it just causes flushes of some other
366 * buffers and more I/O will be performed. Don't use __get_free_pages if it
367 * always fails (i.e. order >= MAX_ORDER).
369 * If the allocation shouldn't fail we use __vmalloc. This is only for the
370 * initial reserve allocation, so there's no risk of wasting all vmalloc
373 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
374 enum data_mode *data_mode)
376 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
377 *data_mode = DATA_MODE_SLAB;
378 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
381 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
382 gfp_mask & __GFP_NORETRY) {
383 *data_mode = DATA_MODE_GET_FREE_PAGES;
384 return (void *)__get_free_pages(gfp_mask,
385 c->pages_per_block_bits);
388 *data_mode = DATA_MODE_VMALLOC;
391 * __vmalloc allocates the data pages and auxiliary structures with
392 * gfp_flags that were specified, but pagetables are always allocated
393 * with GFP_KERNEL, no matter what was specified as gfp_mask.
395 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
396 * all allocations done by this process (including pagetables) are done
397 * as if GFP_NOIO was specified.
399 if (gfp_mask & __GFP_NORETRY) {
400 unsigned noio_flag = memalloc_noio_save();
401 void *ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM,
404 memalloc_noio_restore(noio_flag);
408 return __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
412 * Free buffer's data.
414 static void free_buffer_data(struct dm_bufio_client *c,
415 void *data, enum data_mode data_mode)
419 kmem_cache_free(DM_BUFIO_CACHE(c), data);
422 case DATA_MODE_GET_FREE_PAGES:
423 free_pages((unsigned long)data, c->pages_per_block_bits);
426 case DATA_MODE_VMALLOC:
431 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
438 * Allocate buffer and its data.
440 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
442 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
450 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
456 adjust_total_allocated(b->data_mode, (long)c->block_size);
458 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
459 memset(&b->stack_trace, 0, sizeof(b->stack_trace));
465 * Free buffer and its data.
467 static void free_buffer(struct dm_buffer *b)
469 struct dm_bufio_client *c = b->c;
471 adjust_total_allocated(b->data_mode, -(long)c->block_size);
473 free_buffer_data(c, b->data, b->data_mode);
478 * Link buffer to the hash list and clean or dirty queue.
480 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
482 struct dm_bufio_client *c = b->c;
484 c->n_buffers[dirty]++;
486 b->list_mode = dirty;
487 list_add(&b->lru_list, &c->lru[dirty]);
489 b->last_accessed = jiffies;
493 * Unlink buffer from the hash list and dirty or clean queue.
495 static void __unlink_buffer(struct dm_buffer *b)
497 struct dm_bufio_client *c = b->c;
499 BUG_ON(!c->n_buffers[b->list_mode]);
501 c->n_buffers[b->list_mode]--;
503 list_del(&b->lru_list);
507 * Place the buffer to the head of dirty or clean LRU queue.
509 static void __relink_lru(struct dm_buffer *b, int dirty)
511 struct dm_bufio_client *c = b->c;
513 BUG_ON(!c->n_buffers[b->list_mode]);
515 c->n_buffers[b->list_mode]--;
516 c->n_buffers[dirty]++;
517 b->list_mode = dirty;
518 list_move(&b->lru_list, &c->lru[dirty]);
519 b->last_accessed = jiffies;
522 /*----------------------------------------------------------------
523 * Submit I/O on the buffer.
525 * Bio interface is faster but it has some problems:
526 * the vector list is limited (increasing this limit increases
527 * memory-consumption per buffer, so it is not viable);
529 * the memory must be direct-mapped, not vmalloced;
531 * the I/O driver can reject requests spuriously if it thinks that
532 * the requests are too big for the device or if they cross a
533 * controller-defined memory boundary.
535 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
536 * it is not vmalloced, try using the bio interface.
538 * If the buffer is big, if it is vmalloced or if the underlying device
539 * rejects the bio because it is too large, use dm-io layer to do the I/O.
540 * The dm-io layer splits the I/O into multiple requests, avoiding the above
542 *--------------------------------------------------------------*/
545 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
546 * that the request was handled directly with bio interface.
548 static void dmio_complete(unsigned long error, void *context)
550 struct dm_buffer *b = context;
552 b->bio.bi_error = error ? -EIO : 0;
553 b->bio.bi_end_io(&b->bio);
556 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
557 bio_end_io_t *end_io)
560 struct dm_io_request io_req = {
563 .notify.fn = dmio_complete,
565 .client = b->c->dm_io,
567 struct dm_io_region region = {
569 .sector = block << b->c->sectors_per_block_bits,
570 .count = b->c->block_size >> SECTOR_SHIFT,
573 if (b->data_mode != DATA_MODE_VMALLOC) {
574 io_req.mem.type = DM_IO_KMEM;
575 io_req.mem.ptr.addr = b->data;
577 io_req.mem.type = DM_IO_VMA;
578 io_req.mem.ptr.vma = b->data;
581 b->bio.bi_end_io = end_io;
583 r = dm_io(&io_req, 1, ®ion, NULL);
590 static void inline_endio(struct bio *bio)
592 bio_end_io_t *end_fn = bio->bi_private;
593 int error = bio->bi_error;
596 * Reset the bio to free any attached resources
597 * (e.g. bio integrity profiles).
601 bio->bi_error = error;
605 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
606 bio_end_io_t *end_io)
612 b->bio.bi_io_vec = b->bio_vec;
613 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
614 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
615 b->bio.bi_bdev = b->c->bdev;
616 b->bio.bi_end_io = inline_endio;
618 * Use of .bi_private isn't a problem here because
619 * the dm_buffer's inline bio is local to bufio.
621 b->bio.bi_private = end_io;
622 bio_set_op_attrs(&b->bio, rw, 0);
625 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
626 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
629 len = b->c->block_size;
631 if (len >= PAGE_SIZE)
632 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
634 BUG_ON((unsigned long)ptr & (len - 1));
637 if (!bio_add_page(&b->bio, virt_to_page(ptr),
638 len < PAGE_SIZE ? len : PAGE_SIZE,
639 offset_in_page(ptr))) {
640 BUG_ON(b->c->block_size <= PAGE_SIZE);
641 use_dmio(b, rw, block, end_io);
652 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
653 bio_end_io_t *end_io)
655 if (rw == WRITE && b->c->write_callback)
656 b->c->write_callback(b);
658 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
659 b->data_mode != DATA_MODE_VMALLOC)
660 use_inline_bio(b, rw, block, end_io);
662 use_dmio(b, rw, block, end_io);
665 /*----------------------------------------------------------------
666 * Writing dirty buffers
667 *--------------------------------------------------------------*/
670 * The endio routine for write.
672 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
675 static void write_endio(struct bio *bio)
677 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
679 b->write_error = bio->bi_error;
680 if (unlikely(bio->bi_error)) {
681 struct dm_bufio_client *c = b->c;
682 int error = bio->bi_error;
683 (void)cmpxchg(&c->async_write_error, 0, error);
686 BUG_ON(!test_bit(B_WRITING, &b->state));
688 smp_mb__before_atomic();
689 clear_bit(B_WRITING, &b->state);
690 smp_mb__after_atomic();
692 wake_up_bit(&b->state, B_WRITING);
696 * Initiate a write on a dirty buffer, but don't wait for it.
698 * - If the buffer is not dirty, exit.
699 * - If there some previous write going on, wait for it to finish (we can't
700 * have two writes on the same buffer simultaneously).
701 * - Submit our write and don't wait on it. We set B_WRITING indicating
702 * that there is a write in progress.
704 static void __write_dirty_buffer(struct dm_buffer *b,
705 struct list_head *write_list)
707 if (!test_bit(B_DIRTY, &b->state))
710 clear_bit(B_DIRTY, &b->state);
711 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
714 submit_io(b, WRITE, b->block, write_endio);
716 list_add_tail(&b->write_list, write_list);
719 static void __flush_write_list(struct list_head *write_list)
721 struct blk_plug plug;
722 blk_start_plug(&plug);
723 while (!list_empty(write_list)) {
724 struct dm_buffer *b =
725 list_entry(write_list->next, struct dm_buffer, write_list);
726 list_del(&b->write_list);
727 submit_io(b, WRITE, b->block, write_endio);
730 blk_finish_plug(&plug);
734 * Wait until any activity on the buffer finishes. Possibly write the
735 * buffer if it is dirty. When this function finishes, there is no I/O
736 * running on the buffer and the buffer is not dirty.
738 static void __make_buffer_clean(struct dm_buffer *b)
740 BUG_ON(b->hold_count);
742 if (!b->state) /* fast case */
745 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
746 __write_dirty_buffer(b, NULL);
747 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
751 * Find some buffer that is not held by anybody, clean it, unlink it and
754 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
758 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
759 BUG_ON(test_bit(B_WRITING, &b->state));
760 BUG_ON(test_bit(B_DIRTY, &b->state));
762 if (!b->hold_count) {
763 __make_buffer_clean(b);
770 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
771 BUG_ON(test_bit(B_READING, &b->state));
773 if (!b->hold_count) {
774 __make_buffer_clean(b);
785 * Wait until some other threads free some buffer or release hold count on
788 * This function is entered with c->lock held, drops it and regains it
791 static void __wait_for_free_buffer(struct dm_bufio_client *c)
793 DECLARE_WAITQUEUE(wait, current);
795 add_wait_queue(&c->free_buffer_wait, &wait);
796 set_task_state(current, TASK_UNINTERRUPTIBLE);
801 remove_wait_queue(&c->free_buffer_wait, &wait);
814 * Allocate a new buffer. If the allocation is not possible, wait until
815 * some other thread frees a buffer.
817 * May drop the lock and regain it.
819 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
822 bool tried_noio_alloc = false;
825 * dm-bufio is resistant to allocation failures (it just keeps
826 * one buffer reserved in cases all the allocations fail).
827 * So set flags to not try too hard:
828 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
829 * mutex and wait ourselves.
830 * __GFP_NORETRY: don't retry and rather return failure
831 * __GFP_NOMEMALLOC: don't use emergency reserves
832 * __GFP_NOWARN: don't print a warning in case of failure
834 * For debugging, if we set the cache size to 1, no new buffers will
838 if (dm_bufio_cache_size_latch != 1) {
839 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
844 if (nf == NF_PREFETCH)
847 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
849 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
853 tried_noio_alloc = true;
856 if (!list_empty(&c->reserved_buffers)) {
857 b = list_entry(c->reserved_buffers.next,
858 struct dm_buffer, lru_list);
859 list_del(&b->lru_list);
860 c->need_reserved_buffers++;
865 b = __get_unclaimed_buffer(c);
869 __wait_for_free_buffer(c);
873 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
875 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
880 if (c->alloc_callback)
881 c->alloc_callback(b);
887 * Free a buffer and wake other threads waiting for free buffers.
889 static void __free_buffer_wake(struct dm_buffer *b)
891 struct dm_bufio_client *c = b->c;
893 if (!c->need_reserved_buffers)
896 list_add(&b->lru_list, &c->reserved_buffers);
897 c->need_reserved_buffers--;
900 wake_up(&c->free_buffer_wait);
903 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
904 struct list_head *write_list)
906 struct dm_buffer *b, *tmp;
908 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
909 BUG_ON(test_bit(B_READING, &b->state));
911 if (!test_bit(B_DIRTY, &b->state) &&
912 !test_bit(B_WRITING, &b->state)) {
913 __relink_lru(b, LIST_CLEAN);
917 if (no_wait && test_bit(B_WRITING, &b->state))
920 __write_dirty_buffer(b, write_list);
926 * Get writeback threshold and buffer limit for a given client.
928 static void __get_memory_limit(struct dm_bufio_client *c,
929 unsigned long *threshold_buffers,
930 unsigned long *limit_buffers)
932 unsigned long buffers;
934 if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
935 if (mutex_trylock(&dm_bufio_clients_lock)) {
936 __cache_size_refresh();
937 mutex_unlock(&dm_bufio_clients_lock);
941 buffers = dm_bufio_cache_size_per_client >>
942 (c->sectors_per_block_bits + SECTOR_SHIFT);
944 if (buffers < c->minimum_buffers)
945 buffers = c->minimum_buffers;
947 *limit_buffers = buffers;
948 *threshold_buffers = mult_frac(buffers,
949 DM_BUFIO_WRITEBACK_PERCENT, 100);
953 * Check if we're over watermark.
954 * If we are over threshold_buffers, start freeing buffers.
955 * If we're over "limit_buffers", block until we get under the limit.
957 static void __check_watermark(struct dm_bufio_client *c,
958 struct list_head *write_list)
960 unsigned long threshold_buffers, limit_buffers;
962 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
964 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
967 struct dm_buffer *b = __get_unclaimed_buffer(c);
972 __free_buffer_wake(b);
976 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
977 __write_dirty_buffers_async(c, 1, write_list);
980 /*----------------------------------------------------------------
982 *--------------------------------------------------------------*/
984 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
985 enum new_flag nf, int *need_submit,
986 struct list_head *write_list)
988 struct dm_buffer *b, *new_b = NULL;
992 b = __find(c, block);
999 new_b = __alloc_buffer_wait(c, nf);
1004 * We've had a period where the mutex was unlocked, so need to
1005 * recheck the hash table.
1007 b = __find(c, block);
1009 __free_buffer_wake(new_b);
1013 __check_watermark(c, write_list);
1019 __link_buffer(b, block, LIST_CLEAN);
1021 if (nf == NF_FRESH) {
1026 b->state = 1 << B_READING;
1032 if (nf == NF_PREFETCH)
1035 * Note: it is essential that we don't wait for the buffer to be
1036 * read if dm_bufio_get function is used. Both dm_bufio_get and
1037 * dm_bufio_prefetch can be used in the driver request routine.
1038 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1039 * the same buffer, it would deadlock if we waited.
1041 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1045 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1046 test_bit(B_WRITING, &b->state));
1051 * The endio routine for reading: set the error, clear the bit and wake up
1052 * anyone waiting on the buffer.
1054 static void read_endio(struct bio *bio)
1056 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1058 b->read_error = bio->bi_error;
1060 BUG_ON(!test_bit(B_READING, &b->state));
1062 smp_mb__before_atomic();
1063 clear_bit(B_READING, &b->state);
1064 smp_mb__after_atomic();
1066 wake_up_bit(&b->state, B_READING);
1070 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1071 * functions is similar except that dm_bufio_new doesn't read the
1072 * buffer from the disk (assuming that the caller overwrites all the data
1073 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1075 static void *new_read(struct dm_bufio_client *c, sector_t block,
1076 enum new_flag nf, struct dm_buffer **bp)
1079 struct dm_buffer *b;
1081 LIST_HEAD(write_list);
1084 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1085 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1086 if (b && b->hold_count == 1)
1087 buffer_record_stack(b);
1091 __flush_write_list(&write_list);
1097 submit_io(b, READ, b->block, read_endio);
1099 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1101 if (b->read_error) {
1102 int error = b->read_error;
1104 dm_bufio_release(b);
1106 return ERR_PTR(error);
1114 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1115 struct dm_buffer **bp)
1117 return new_read(c, block, NF_GET, bp);
1119 EXPORT_SYMBOL_GPL(dm_bufio_get);
1121 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1122 struct dm_buffer **bp)
1124 BUG_ON(dm_bufio_in_request());
1126 return new_read(c, block, NF_READ, bp);
1128 EXPORT_SYMBOL_GPL(dm_bufio_read);
1130 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1131 struct dm_buffer **bp)
1133 BUG_ON(dm_bufio_in_request());
1135 return new_read(c, block, NF_FRESH, bp);
1137 EXPORT_SYMBOL_GPL(dm_bufio_new);
1139 void dm_bufio_prefetch(struct dm_bufio_client *c,
1140 sector_t block, unsigned n_blocks)
1142 struct blk_plug plug;
1144 LIST_HEAD(write_list);
1146 BUG_ON(dm_bufio_in_request());
1148 blk_start_plug(&plug);
1151 for (; n_blocks--; block++) {
1153 struct dm_buffer *b;
1154 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1156 if (unlikely(!list_empty(&write_list))) {
1158 blk_finish_plug(&plug);
1159 __flush_write_list(&write_list);
1160 blk_start_plug(&plug);
1163 if (unlikely(b != NULL)) {
1167 submit_io(b, READ, b->block, read_endio);
1168 dm_bufio_release(b);
1181 blk_finish_plug(&plug);
1183 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1185 void dm_bufio_release(struct dm_buffer *b)
1187 struct dm_bufio_client *c = b->c;
1191 BUG_ON(!b->hold_count);
1194 if (!b->hold_count) {
1195 wake_up(&c->free_buffer_wait);
1198 * If there were errors on the buffer, and the buffer is not
1199 * to be written, free the buffer. There is no point in caching
1202 if ((b->read_error || b->write_error) &&
1203 !test_bit(B_READING, &b->state) &&
1204 !test_bit(B_WRITING, &b->state) &&
1205 !test_bit(B_DIRTY, &b->state)) {
1207 __free_buffer_wake(b);
1213 EXPORT_SYMBOL_GPL(dm_bufio_release);
1215 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1217 struct dm_bufio_client *c = b->c;
1221 BUG_ON(test_bit(B_READING, &b->state));
1223 if (!test_and_set_bit(B_DIRTY, &b->state))
1224 __relink_lru(b, LIST_DIRTY);
1228 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1230 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1232 LIST_HEAD(write_list);
1234 BUG_ON(dm_bufio_in_request());
1237 __write_dirty_buffers_async(c, 0, &write_list);
1239 __flush_write_list(&write_list);
1241 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1244 * For performance, it is essential that the buffers are written asynchronously
1245 * and simultaneously (so that the block layer can merge the writes) and then
1248 * Finally, we flush hardware disk cache.
1250 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1253 unsigned long buffers_processed = 0;
1254 struct dm_buffer *b, *tmp;
1256 LIST_HEAD(write_list);
1259 __write_dirty_buffers_async(c, 0, &write_list);
1261 __flush_write_list(&write_list);
1265 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1266 int dropped_lock = 0;
1268 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1269 buffers_processed++;
1271 BUG_ON(test_bit(B_READING, &b->state));
1273 if (test_bit(B_WRITING, &b->state)) {
1274 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1278 wait_on_bit_io(&b->state, B_WRITING,
1279 TASK_UNINTERRUPTIBLE);
1283 wait_on_bit_io(&b->state, B_WRITING,
1284 TASK_UNINTERRUPTIBLE);
1287 if (!test_bit(B_DIRTY, &b->state) &&
1288 !test_bit(B_WRITING, &b->state))
1289 __relink_lru(b, LIST_CLEAN);
1294 * If we dropped the lock, the list is no longer consistent,
1295 * so we must restart the search.
1297 * In the most common case, the buffer just processed is
1298 * relinked to the clean list, so we won't loop scanning the
1299 * same buffer again and again.
1301 * This may livelock if there is another thread simultaneously
1302 * dirtying buffers, so we count the number of buffers walked
1303 * and if it exceeds the total number of buffers, it means that
1304 * someone is doing some writes simultaneously with us. In
1305 * this case, stop, dropping the lock.
1310 wake_up(&c->free_buffer_wait);
1313 a = xchg(&c->async_write_error, 0);
1314 f = dm_bufio_issue_flush(c);
1320 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1323 * Use dm-io to send and empty barrier flush the device.
1325 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1327 struct dm_io_request io_req = {
1328 .bi_op = REQ_OP_WRITE,
1329 .bi_op_flags = WRITE_FLUSH,
1330 .mem.type = DM_IO_KMEM,
1331 .mem.ptr.addr = NULL,
1334 struct dm_io_region io_reg = {
1340 BUG_ON(dm_bufio_in_request());
1342 return dm_io(&io_req, 1, &io_reg, NULL);
1344 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1347 * We first delete any other buffer that may be at that new location.
1349 * Then, we write the buffer to the original location if it was dirty.
1351 * Then, if we are the only one who is holding the buffer, relink the buffer
1352 * in the hash queue for the new location.
1354 * If there was someone else holding the buffer, we write it to the new
1355 * location but not relink it, because that other user needs to have the buffer
1356 * at the same place.
1358 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1360 struct dm_bufio_client *c = b->c;
1361 struct dm_buffer *new;
1363 BUG_ON(dm_bufio_in_request());
1368 new = __find(c, new_block);
1370 if (new->hold_count) {
1371 __wait_for_free_buffer(c);
1376 * FIXME: Is there any point waiting for a write that's going
1377 * to be overwritten in a bit?
1379 __make_buffer_clean(new);
1380 __unlink_buffer(new);
1381 __free_buffer_wake(new);
1384 BUG_ON(!b->hold_count);
1385 BUG_ON(test_bit(B_READING, &b->state));
1387 __write_dirty_buffer(b, NULL);
1388 if (b->hold_count == 1) {
1389 wait_on_bit_io(&b->state, B_WRITING,
1390 TASK_UNINTERRUPTIBLE);
1391 set_bit(B_DIRTY, &b->state);
1393 __link_buffer(b, new_block, LIST_DIRTY);
1396 wait_on_bit_lock_io(&b->state, B_WRITING,
1397 TASK_UNINTERRUPTIBLE);
1399 * Relink buffer to "new_block" so that write_callback
1400 * sees "new_block" as a block number.
1401 * After the write, link the buffer back to old_block.
1402 * All this must be done in bufio lock, so that block number
1403 * change isn't visible to other threads.
1405 old_block = b->block;
1407 __link_buffer(b, new_block, b->list_mode);
1408 submit_io(b, WRITE, new_block, write_endio);
1409 wait_on_bit_io(&b->state, B_WRITING,
1410 TASK_UNINTERRUPTIBLE);
1412 __link_buffer(b, old_block, b->list_mode);
1416 dm_bufio_release(b);
1418 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1421 * Free the given buffer.
1423 * This is just a hint, if the buffer is in use or dirty, this function
1426 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1428 struct dm_buffer *b;
1432 b = __find(c, block);
1433 if (b && likely(!b->hold_count) && likely(!b->state)) {
1435 __free_buffer_wake(b);
1440 EXPORT_SYMBOL(dm_bufio_forget);
1442 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1444 c->minimum_buffers = n;
1446 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1448 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1450 return c->block_size;
1452 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1454 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1456 return i_size_read(c->bdev->bd_inode) >>
1457 (SECTOR_SHIFT + c->sectors_per_block_bits);
1459 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1461 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1465 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1467 void *dm_bufio_get_block_data(struct dm_buffer *b)
1471 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1473 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1477 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1479 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1483 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1485 static void drop_buffers(struct dm_bufio_client *c)
1487 struct dm_buffer *b;
1489 bool warned = false;
1491 BUG_ON(dm_bufio_in_request());
1494 * An optimization so that the buffers are not written one-by-one.
1496 dm_bufio_write_dirty_buffers_async(c);
1500 while ((b = __get_unclaimed_buffer(c)))
1501 __free_buffer_wake(b);
1503 for (i = 0; i < LIST_SIZE; i++)
1504 list_for_each_entry(b, &c->lru[i], lru_list) {
1507 DMERR("leaked buffer %llx, hold count %u, list %d",
1508 (unsigned long long)b->block, b->hold_count, i);
1509 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1510 print_stack_trace(&b->stack_trace, 1);
1511 b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1515 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1516 while ((b = __get_unclaimed_buffer(c)))
1517 __free_buffer_wake(b);
1520 for (i = 0; i < LIST_SIZE; i++)
1521 BUG_ON(!list_empty(&c->lru[i]));
1527 * We may not be able to evict this buffer if IO pending or the client
1528 * is still using it. Caller is expected to know buffer is too old.
1530 * And if GFP_NOFS is used, we must not do any I/O because we hold
1531 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1532 * rerouted to different bufio client.
1534 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1536 if (!(gfp & __GFP_FS)) {
1537 if (test_bit(B_READING, &b->state) ||
1538 test_bit(B_WRITING, &b->state) ||
1539 test_bit(B_DIRTY, &b->state))
1546 __make_buffer_clean(b);
1548 __free_buffer_wake(b);
1553 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1555 unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1556 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1559 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1563 struct dm_buffer *b, *tmp;
1564 unsigned long freed = 0;
1565 unsigned long count = c->n_buffers[LIST_CLEAN] +
1566 c->n_buffers[LIST_DIRTY];
1567 unsigned long retain_target = get_retain_buffers(c);
1569 for (l = 0; l < LIST_SIZE; l++) {
1570 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1571 if (__try_evict_buffer(b, gfp_mask))
1573 if (!--nr_to_scan || ((count - freed) <= retain_target))
1581 static unsigned long
1582 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1584 struct dm_bufio_client *c;
1585 unsigned long freed;
1587 c = container_of(shrink, struct dm_bufio_client, shrinker);
1588 if (sc->gfp_mask & __GFP_FS)
1590 else if (!dm_bufio_trylock(c))
1593 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1598 static unsigned long
1599 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1601 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1603 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1604 unsigned long retain_target = get_retain_buffers(c);
1606 return (count < retain_target) ? 0 : (count - retain_target);
1610 * Create the buffering interface
1612 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1613 unsigned reserved_buffers, unsigned aux_size,
1614 void (*alloc_callback)(struct dm_buffer *),
1615 void (*write_callback)(struct dm_buffer *))
1618 struct dm_bufio_client *c;
1621 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1622 (block_size & (block_size - 1)));
1624 c = kzalloc(sizeof(*c), GFP_KERNEL);
1629 c->buffer_tree = RB_ROOT;
1632 c->block_size = block_size;
1633 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1634 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1635 __ffs(block_size) - PAGE_SHIFT : 0;
1636 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1637 PAGE_SHIFT - __ffs(block_size) : 0);
1639 c->aux_size = aux_size;
1640 c->alloc_callback = alloc_callback;
1641 c->write_callback = write_callback;
1643 for (i = 0; i < LIST_SIZE; i++) {
1644 INIT_LIST_HEAD(&c->lru[i]);
1645 c->n_buffers[i] = 0;
1648 mutex_init(&c->lock);
1649 INIT_LIST_HEAD(&c->reserved_buffers);
1650 c->need_reserved_buffers = reserved_buffers;
1652 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1654 init_waitqueue_head(&c->free_buffer_wait);
1655 c->async_write_error = 0;
1657 c->dm_io = dm_io_client_create();
1658 if (IS_ERR(c->dm_io)) {
1659 r = PTR_ERR(c->dm_io);
1663 mutex_lock(&dm_bufio_clients_lock);
1664 if (c->blocks_per_page_bits) {
1665 if (!DM_BUFIO_CACHE_NAME(c)) {
1666 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1667 if (!DM_BUFIO_CACHE_NAME(c)) {
1669 mutex_unlock(&dm_bufio_clients_lock);
1674 if (!DM_BUFIO_CACHE(c)) {
1675 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1677 c->block_size, 0, NULL);
1678 if (!DM_BUFIO_CACHE(c)) {
1680 mutex_unlock(&dm_bufio_clients_lock);
1685 mutex_unlock(&dm_bufio_clients_lock);
1687 while (c->need_reserved_buffers) {
1688 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1694 __free_buffer_wake(b);
1697 mutex_lock(&dm_bufio_clients_lock);
1698 dm_bufio_client_count++;
1699 list_add(&c->client_list, &dm_bufio_all_clients);
1700 __cache_size_refresh();
1701 mutex_unlock(&dm_bufio_clients_lock);
1703 c->shrinker.count_objects = dm_bufio_shrink_count;
1704 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1705 c->shrinker.seeks = 1;
1706 c->shrinker.batch = 0;
1707 register_shrinker(&c->shrinker);
1713 while (!list_empty(&c->reserved_buffers)) {
1714 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1715 struct dm_buffer, lru_list);
1716 list_del(&b->lru_list);
1719 dm_io_client_destroy(c->dm_io);
1725 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1728 * Free the buffering interface.
1729 * It is required that there are no references on any buffers.
1731 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1737 unregister_shrinker(&c->shrinker);
1739 mutex_lock(&dm_bufio_clients_lock);
1741 list_del(&c->client_list);
1742 dm_bufio_client_count--;
1743 __cache_size_refresh();
1745 mutex_unlock(&dm_bufio_clients_lock);
1747 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1748 BUG_ON(c->need_reserved_buffers);
1750 while (!list_empty(&c->reserved_buffers)) {
1751 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1752 struct dm_buffer, lru_list);
1753 list_del(&b->lru_list);
1757 for (i = 0; i < LIST_SIZE; i++)
1758 if (c->n_buffers[i])
1759 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1761 for (i = 0; i < LIST_SIZE; i++)
1762 BUG_ON(c->n_buffers[i]);
1764 dm_io_client_destroy(c->dm_io);
1767 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1769 static unsigned get_max_age_hz(void)
1771 unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1773 if (max_age > UINT_MAX / HZ)
1774 max_age = UINT_MAX / HZ;
1776 return max_age * HZ;
1779 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1781 return time_after_eq(jiffies, b->last_accessed + age_hz);
1784 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1786 struct dm_buffer *b, *tmp;
1787 unsigned long retain_target = get_retain_buffers(c);
1788 unsigned long count;
1789 LIST_HEAD(write_list);
1793 __check_watermark(c, &write_list);
1794 if (unlikely(!list_empty(&write_list))) {
1796 __flush_write_list(&write_list);
1800 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1801 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1802 if (count <= retain_target)
1805 if (!older_than(b, age_hz))
1808 if (__try_evict_buffer(b, 0))
1817 static void cleanup_old_buffers(void)
1819 unsigned long max_age_hz = get_max_age_hz();
1820 struct dm_bufio_client *c;
1822 mutex_lock(&dm_bufio_clients_lock);
1824 __cache_size_refresh();
1826 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1827 __evict_old_buffers(c, max_age_hz);
1829 mutex_unlock(&dm_bufio_clients_lock);
1832 static struct workqueue_struct *dm_bufio_wq;
1833 static struct delayed_work dm_bufio_work;
1835 static void work_fn(struct work_struct *w)
1837 cleanup_old_buffers();
1839 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1840 DM_BUFIO_WORK_TIMER_SECS * HZ);
1843 /*----------------------------------------------------------------
1845 *--------------------------------------------------------------*/
1848 * This is called only once for the whole dm_bufio module.
1849 * It initializes memory limit.
1851 static int __init dm_bufio_init(void)
1855 dm_bufio_allocated_kmem_cache = 0;
1856 dm_bufio_allocated_get_free_pages = 0;
1857 dm_bufio_allocated_vmalloc = 0;
1858 dm_bufio_current_allocated = 0;
1860 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1861 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1863 mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1864 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1866 if (mem > ULONG_MAX)
1870 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1871 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1874 dm_bufio_default_cache_size = mem;
1876 mutex_lock(&dm_bufio_clients_lock);
1877 __cache_size_refresh();
1878 mutex_unlock(&dm_bufio_clients_lock);
1880 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1884 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1885 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1886 DM_BUFIO_WORK_TIMER_SECS * HZ);
1892 * This is called once when unloading the dm_bufio module.
1894 static void __exit dm_bufio_exit(void)
1899 cancel_delayed_work_sync(&dm_bufio_work);
1900 destroy_workqueue(dm_bufio_wq);
1902 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1903 kmem_cache_destroy(dm_bufio_caches[i]);
1905 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1906 kfree(dm_bufio_cache_names[i]);
1908 if (dm_bufio_client_count) {
1909 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1910 __func__, dm_bufio_client_count);
1914 if (dm_bufio_current_allocated) {
1915 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1916 __func__, dm_bufio_current_allocated);
1920 if (dm_bufio_allocated_get_free_pages) {
1921 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1922 __func__, dm_bufio_allocated_get_free_pages);
1926 if (dm_bufio_allocated_vmalloc) {
1927 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1928 __func__, dm_bufio_allocated_vmalloc);
1935 module_init(dm_bufio_init)
1936 module_exit(dm_bufio_exit)
1938 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1939 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1941 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1942 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1944 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1945 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1947 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1948 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1950 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1951 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1953 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1954 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1956 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1957 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1959 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1960 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1962 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1963 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1964 MODULE_LICENSE("GPL");