1 // SPDX-License-Identifier: GPL-2.0-only
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
16 * flush after percent set rather than just time based. (maybe both).
19 #include <linux/blkdev.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/sched.h>
26 #include <linux/list.h>
27 #include <linux/file.h>
28 #include <linux/mount.h>
29 #include <linux/buffer_head.h>
30 #include <linux/seq_file.h>
31 #include <trace/events/block.h>
33 #include "md-bitmap.h"
35 static inline char *bmname(struct bitmap *bitmap)
37 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
41 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
43 * 1) check to see if this page is allocated, if it's not then try to alloc
44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45 * page pointer directly as a counter
47 * if we find our page, we increment the page's refcount so that it stays
48 * allocated while we're using it
50 static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51 unsigned long page, int create, int no_hijack)
52 __releases(bitmap->lock)
53 __acquires(bitmap->lock)
55 unsigned char *mappage;
57 if (page >= bitmap->pages) {
58 /* This can happen if bitmap_start_sync goes beyond
59 * End-of-device while looking for a whole page.
65 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
68 if (bitmap->bp[page].map) /* page is already allocated, just return */
74 /* this page has not been allocated yet */
76 spin_unlock_irq(&bitmap->lock);
77 /* It is possible that this is being called inside a
78 * prepare_to_wait/finish_wait loop from raid5c:make_request().
79 * In general it is not permitted to sleep in that context as it
80 * can cause the loop to spin freely.
81 * That doesn't apply here as we can only reach this point
83 * When this function completes, either bp[page].map or
84 * bp[page].hijacked. In either case, this function will
85 * abort before getting to this point again. So there is
86 * no risk of a free-spin, and so it is safe to assert
87 * that sleeping here is allowed.
89 sched_annotate_sleep();
90 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
91 spin_lock_irq(&bitmap->lock);
93 if (mappage == NULL) {
94 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
95 /* We don't support hijack for cluster raid */
98 /* failed - set the hijacked flag so that we can use the
99 * pointer as a counter */
100 if (!bitmap->bp[page].map)
101 bitmap->bp[page].hijacked = 1;
102 } else if (bitmap->bp[page].map ||
103 bitmap->bp[page].hijacked) {
104 /* somebody beat us to getting the page */
108 /* no page was in place and we have one, so install it */
110 bitmap->bp[page].map = mappage;
111 bitmap->missing_pages--;
116 /* if page is completely empty, put it back on the free list, or dealloc it */
117 /* if page was hijacked, unmark the flag so it might get alloced next time */
118 /* Note: lock should be held when calling this */
119 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
123 if (bitmap->bp[page].count) /* page is still busy */
126 /* page is no longer in use, it can be released */
128 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
129 bitmap->bp[page].hijacked = 0;
130 bitmap->bp[page].map = NULL;
132 /* normal case, free the page */
133 ptr = bitmap->bp[page].map;
134 bitmap->bp[page].map = NULL;
135 bitmap->missing_pages++;
141 * bitmap file handling - read and write the bitmap file and its superblock
145 * basic page I/O operations
148 /* IO operations when bitmap is stored near all superblocks */
149 static int read_sb_page(struct mddev *mddev, loff_t offset,
151 unsigned long index, int size)
153 /* choose a good rdev and read the page from there */
155 struct md_rdev *rdev;
158 rdev_for_each(rdev, mddev) {
159 if (! test_bit(In_sync, &rdev->flags)
160 || test_bit(Faulty, &rdev->flags)
161 || test_bit(Bitmap_sync, &rdev->flags))
164 target = offset + index * (PAGE_SIZE/512);
166 if (sync_page_io(rdev, target,
167 roundup(size, bdev_logical_block_size(rdev->bdev)),
168 page, REQ_OP_READ, 0, true)) {
176 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
178 /* Iterate the disks of an mddev, using rcu to protect access to the
179 * linked list, and raising the refcount of devices we return to ensure
180 * they don't disappear while in use.
181 * As devices are only added or removed when raid_disk is < 0 and
182 * nr_pending is 0 and In_sync is clear, the entries we return will
183 * still be in the same position on the list when we re-enter
184 * list_for_each_entry_continue_rcu.
186 * Note that if entered with 'rdev == NULL' to start at the
187 * beginning, we temporarily assign 'rdev' to an address which
188 * isn't really an rdev, but which can be used by
189 * list_for_each_entry_continue_rcu() to find the first entry.
193 /* start at the beginning */
194 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
196 /* release the previous rdev and start from there. */
197 rdev_dec_pending(rdev, mddev);
199 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
200 if (rdev->raid_disk >= 0 &&
201 !test_bit(Faulty, &rdev->flags)) {
202 /* this is a usable devices */
203 atomic_inc(&rdev->nr_pending);
212 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
214 struct md_rdev *rdev;
215 struct block_device *bdev;
216 struct mddev *mddev = bitmap->mddev;
217 struct bitmap_storage *store = &bitmap->storage;
221 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
222 int size = PAGE_SIZE;
223 loff_t offset = mddev->bitmap_info.offset;
225 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
227 if (page->index == store->file_pages-1) {
228 int last_page_size = store->bytes & (PAGE_SIZE-1);
229 if (last_page_size == 0)
230 last_page_size = PAGE_SIZE;
231 size = roundup(last_page_size,
232 bdev_logical_block_size(bdev));
234 /* Just make sure we aren't corrupting data or
237 if (mddev->external) {
238 /* Bitmap could be anywhere. */
239 if (rdev->sb_start + offset + (page->index
243 rdev->sb_start + offset
244 < (rdev->data_offset + mddev->dev_sectors
247 } else if (offset < 0) {
248 /* DATA BITMAP METADATA */
250 + (long)(page->index * (PAGE_SIZE/512))
252 /* bitmap runs in to metadata */
254 if (rdev->data_offset + mddev->dev_sectors
255 > rdev->sb_start + offset)
256 /* data runs in to bitmap */
258 } else if (rdev->sb_start < rdev->data_offset) {
259 /* METADATA BITMAP DATA */
262 + page->index*(PAGE_SIZE/512) + size/512
264 /* bitmap runs in to data */
267 /* DATA METADATA BITMAP - no problems */
269 md_super_write(mddev, rdev,
270 rdev->sb_start + offset
271 + page->index * (PAGE_SIZE/512),
276 if (wait && md_super_wait(mddev) < 0)
284 static void md_bitmap_file_kick(struct bitmap *bitmap);
286 * write out a page to a file
288 static void write_page(struct bitmap *bitmap, struct page *page, int wait)
290 struct buffer_head *bh;
292 if (bitmap->storage.file == NULL) {
293 switch (write_sb_page(bitmap, page, wait)) {
295 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
299 bh = page_buffers(page);
301 while (bh && bh->b_blocknr) {
302 atomic_inc(&bitmap->pending_writes);
303 set_buffer_locked(bh);
304 set_buffer_mapped(bh);
305 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
306 bh = bh->b_this_page;
310 wait_event(bitmap->write_wait,
311 atomic_read(&bitmap->pending_writes)==0);
313 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
314 md_bitmap_file_kick(bitmap);
317 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
319 struct bitmap *bitmap = bh->b_private;
322 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
323 if (atomic_dec_and_test(&bitmap->pending_writes))
324 wake_up(&bitmap->write_wait);
327 /* copied from buffer.c */
329 __clear_page_buffers(struct page *page)
331 ClearPagePrivate(page);
332 set_page_private(page, 0);
335 static void free_buffers(struct page *page)
337 struct buffer_head *bh;
339 if (!PagePrivate(page))
342 bh = page_buffers(page);
344 struct buffer_head *next = bh->b_this_page;
345 free_buffer_head(bh);
348 __clear_page_buffers(page);
352 /* read a page from a file.
353 * We both read the page, and attach buffers to the page to record the
354 * address of each block (using bmap). These addresses will be used
355 * to write the block later, completely bypassing the filesystem.
356 * This usage is similar to how swap files are handled, and allows us
357 * to write to a file with no concerns of memory allocation failing.
359 static int read_page(struct file *file, unsigned long index,
360 struct bitmap *bitmap,
365 struct inode *inode = file_inode(file);
366 struct buffer_head *bh;
369 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
370 (unsigned long long)index << PAGE_SHIFT);
372 bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
377 attach_page_buffers(page, bh);
378 block = index << (PAGE_SHIFT - inode->i_blkbits);
383 bh->b_blocknr = bmap(inode, block);
384 if (bh->b_blocknr == 0) {
385 /* Cannot use this file! */
389 bh->b_bdev = inode->i_sb->s_bdev;
390 if (count < (1<<inode->i_blkbits))
393 count -= (1<<inode->i_blkbits);
395 bh->b_end_io = end_bitmap_write;
396 bh->b_private = bitmap;
397 atomic_inc(&bitmap->pending_writes);
398 set_buffer_locked(bh);
399 set_buffer_mapped(bh);
400 submit_bh(REQ_OP_READ, 0, bh);
403 bh = bh->b_this_page;
407 wait_event(bitmap->write_wait,
408 atomic_read(&bitmap->pending_writes)==0);
409 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
413 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
415 (unsigned long long)index << PAGE_SHIFT,
421 * bitmap file superblock operations
425 * md_bitmap_wait_writes() should be called before writing any bitmap
426 * blocks, to ensure previous writes, particularly from
427 * md_bitmap_daemon_work(), have completed.
429 static void md_bitmap_wait_writes(struct bitmap *bitmap)
431 if (bitmap->storage.file)
432 wait_event(bitmap->write_wait,
433 atomic_read(&bitmap->pending_writes)==0);
435 /* Note that we ignore the return value. The writes
436 * might have failed, but that would just mean that
437 * some bits which should be cleared haven't been,
438 * which is safe. The relevant bitmap blocks will
439 * probably get written again, but there is no great
440 * loss if they aren't.
442 md_super_wait(bitmap->mddev);
446 /* update the event counter and sync the superblock to disk */
447 void md_bitmap_update_sb(struct bitmap *bitmap)
451 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
453 if (bitmap->mddev->bitmap_info.external)
455 if (!bitmap->storage.sb_page) /* no superblock */
457 sb = kmap_atomic(bitmap->storage.sb_page);
458 sb->events = cpu_to_le64(bitmap->mddev->events);
459 if (bitmap->mddev->events < bitmap->events_cleared)
460 /* rocking back to read-only */
461 bitmap->events_cleared = bitmap->mddev->events;
462 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
464 * clear BITMAP_WRITE_ERROR bit to protect against the case that
465 * a bitmap write error occurred but the later writes succeeded.
467 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
468 /* Just in case these have been changed via sysfs: */
469 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
470 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
471 /* This might have been changed by a reshape */
472 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
473 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
474 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
475 sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
478 write_page(bitmap, bitmap->storage.sb_page, 1);
480 EXPORT_SYMBOL(md_bitmap_update_sb);
482 /* print out the bitmap file superblock */
483 void md_bitmap_print_sb(struct bitmap *bitmap)
487 if (!bitmap || !bitmap->storage.sb_page)
489 sb = kmap_atomic(bitmap->storage.sb_page);
490 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
491 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
492 pr_debug(" version: %u\n", le32_to_cpu(sb->version));
493 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
494 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
495 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
496 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
497 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
498 pr_debug(" events: %llu\n",
499 (unsigned long long) le64_to_cpu(sb->events));
500 pr_debug("events cleared: %llu\n",
501 (unsigned long long) le64_to_cpu(sb->events_cleared));
502 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
503 pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
504 pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
505 pr_debug(" sync size: %llu KB\n",
506 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
507 pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
515 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
516 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
517 * This function verifies 'bitmap_info' and populates the on-disk bitmap
518 * structure, which is to be written to disk.
520 * Returns: 0 on success, -Exxx on error
522 static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
525 unsigned long chunksize, daemon_sleep, write_behind;
527 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
528 if (bitmap->storage.sb_page == NULL)
530 bitmap->storage.sb_page->index = 0;
532 sb = kmap_atomic(bitmap->storage.sb_page);
534 sb->magic = cpu_to_le32(BITMAP_MAGIC);
535 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
537 chunksize = bitmap->mddev->bitmap_info.chunksize;
539 if (!is_power_of_2(chunksize)) {
541 pr_warn("bitmap chunksize not a power of 2\n");
544 sb->chunksize = cpu_to_le32(chunksize);
546 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
547 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
548 pr_debug("Choosing daemon_sleep default (5 sec)\n");
549 daemon_sleep = 5 * HZ;
551 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
552 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
555 * FIXME: write_behind for RAID1. If not specified, what
556 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
558 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
559 if (write_behind > COUNTER_MAX)
560 write_behind = COUNTER_MAX / 2;
561 sb->write_behind = cpu_to_le32(write_behind);
562 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
564 /* keep the array size field of the bitmap superblock up to date */
565 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
567 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
569 set_bit(BITMAP_STALE, &bitmap->flags);
570 sb->state = cpu_to_le32(bitmap->flags);
571 bitmap->events_cleared = bitmap->mddev->events;
572 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
573 bitmap->mddev->bitmap_info.nodes = 0;
580 /* read the superblock from the bitmap file and initialize some bitmap fields */
581 static int md_bitmap_read_sb(struct bitmap *bitmap)
585 unsigned long chunksize, daemon_sleep, write_behind;
586 unsigned long long events;
588 unsigned long sectors_reserved = 0;
590 struct page *sb_page;
591 loff_t offset = bitmap->mddev->bitmap_info.offset;
593 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
594 chunksize = 128 * 1024 * 1024;
595 daemon_sleep = 5 * HZ;
597 set_bit(BITMAP_STALE, &bitmap->flags);
601 /* page 0 is the superblock, read it... */
602 sb_page = alloc_page(GFP_KERNEL);
605 bitmap->storage.sb_page = sb_page;
608 /* If cluster_slot is set, the cluster is setup */
609 if (bitmap->cluster_slot >= 0) {
610 sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
612 sector_div(bm_blocks,
613 bitmap->mddev->bitmap_info.chunksize >> 9);
615 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
617 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
618 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
619 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
620 bitmap->cluster_slot, offset);
623 if (bitmap->storage.file) {
624 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
625 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
627 err = read_page(bitmap->storage.file, 0,
628 bitmap, bytes, sb_page);
630 err = read_sb_page(bitmap->mddev,
633 0, sizeof(bitmap_super_t));
639 sb = kmap_atomic(sb_page);
641 chunksize = le32_to_cpu(sb->chunksize);
642 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
643 write_behind = le32_to_cpu(sb->write_behind);
644 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
646 /* verify that the bitmap-specific fields are valid */
647 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
648 reason = "bad magic";
649 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
650 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
651 reason = "unrecognized superblock version";
652 else if (chunksize < 512)
653 reason = "bitmap chunksize too small";
654 else if (!is_power_of_2(chunksize))
655 reason = "bitmap chunksize not a power of 2";
656 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
657 reason = "daemon sleep period out of range";
658 else if (write_behind > COUNTER_MAX)
659 reason = "write-behind limit out of range (0 - 16383)";
661 pr_warn("%s: invalid bitmap file superblock: %s\n",
662 bmname(bitmap), reason);
667 * Setup nodes/clustername only if bitmap version is
670 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
671 nodes = le32_to_cpu(sb->nodes);
672 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
673 sb->cluster_name, 64);
676 /* keep the array size field of the bitmap superblock up to date */
677 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
679 if (bitmap->mddev->persistent) {
681 * We have a persistent array superblock, so compare the
682 * bitmap's UUID and event counter to the mddev's
684 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
685 pr_warn("%s: bitmap superblock UUID mismatch\n",
689 events = le64_to_cpu(sb->events);
690 if (!nodes && (events < bitmap->mddev->events)) {
691 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
692 bmname(bitmap), events,
693 (unsigned long long) bitmap->mddev->events);
694 set_bit(BITMAP_STALE, &bitmap->flags);
698 /* assign fields using values from superblock */
699 bitmap->flags |= le32_to_cpu(sb->state);
700 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
701 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
702 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
703 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
708 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
709 /* Assigning chunksize is required for "re_read" */
710 bitmap->mddev->bitmap_info.chunksize = chunksize;
711 err = md_setup_cluster(bitmap->mddev, nodes);
713 pr_warn("%s: Could not setup cluster service (%d)\n",
714 bmname(bitmap), err);
717 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
723 if (test_bit(BITMAP_STALE, &bitmap->flags))
724 bitmap->events_cleared = bitmap->mddev->events;
725 bitmap->mddev->bitmap_info.chunksize = chunksize;
726 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
727 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
728 bitmap->mddev->bitmap_info.nodes = nodes;
729 if (bitmap->mddev->bitmap_info.space == 0 ||
730 bitmap->mddev->bitmap_info.space > sectors_reserved)
731 bitmap->mddev->bitmap_info.space = sectors_reserved;
733 md_bitmap_print_sb(bitmap);
734 if (bitmap->cluster_slot < 0)
735 md_cluster_stop(bitmap->mddev);
741 * general bitmap file operations
747 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
748 * file a page at a time. There's a superblock at the start of the file.
750 /* calculate the index of the page that contains this bit */
751 static inline unsigned long file_page_index(struct bitmap_storage *store,
755 chunk += sizeof(bitmap_super_t) << 3;
756 return chunk >> PAGE_BIT_SHIFT;
759 /* calculate the (bit) offset of this bit within a page */
760 static inline unsigned long file_page_offset(struct bitmap_storage *store,
764 chunk += sizeof(bitmap_super_t) << 3;
765 return chunk & (PAGE_BITS - 1);
769 * return a pointer to the page in the filemap that contains the given bit
772 static inline struct page *filemap_get_page(struct bitmap_storage *store,
775 if (file_page_index(store, chunk) >= store->file_pages)
777 return store->filemap[file_page_index(store, chunk)];
780 static int md_bitmap_storage_alloc(struct bitmap_storage *store,
781 unsigned long chunks, int with_super,
784 int pnum, offset = 0;
785 unsigned long num_pages;
788 bytes = DIV_ROUND_UP(chunks, 8);
790 bytes += sizeof(bitmap_super_t);
792 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
793 offset = slot_number * num_pages;
795 store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
800 if (with_super && !store->sb_page) {
801 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
802 if (store->sb_page == NULL)
807 if (store->sb_page) {
808 store->filemap[0] = store->sb_page;
810 store->sb_page->index = offset;
813 for ( ; pnum < num_pages; pnum++) {
814 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
815 if (!store->filemap[pnum]) {
816 store->file_pages = pnum;
819 store->filemap[pnum]->index = pnum + offset;
821 store->file_pages = pnum;
823 /* We need 4 bits per page, rounded up to a multiple
824 * of sizeof(unsigned long) */
825 store->filemap_attr = kzalloc(
826 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
828 if (!store->filemap_attr)
831 store->bytes = bytes;
836 static void md_bitmap_file_unmap(struct bitmap_storage *store)
838 struct page **map, *sb_page;
843 map = store->filemap;
844 pages = store->file_pages;
845 sb_page = store->sb_page;
848 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
849 free_buffers(map[pages]);
851 kfree(store->filemap_attr);
854 free_buffers(sb_page);
857 struct inode *inode = file_inode(file);
858 invalidate_mapping_pages(inode->i_mapping, 0, -1);
864 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
865 * then it is no longer reliable, so we stop using it and we mark the file
866 * as failed in the superblock
868 static void md_bitmap_file_kick(struct bitmap *bitmap)
870 char *path, *ptr = NULL;
872 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
873 md_bitmap_update_sb(bitmap);
875 if (bitmap->storage.file) {
876 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
878 ptr = file_path(bitmap->storage.file,
881 pr_warn("%s: kicking failed bitmap file %s from array!\n",
882 bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
886 pr_warn("%s: disabling internal bitmap due to errors\n",
891 enum bitmap_page_attr {
892 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
893 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
894 * i.e. counter is 1 or 2. */
895 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
898 static inline void set_page_attr(struct bitmap *bitmap, int pnum,
899 enum bitmap_page_attr attr)
901 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
904 static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
905 enum bitmap_page_attr attr)
907 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
910 static inline int test_page_attr(struct bitmap *bitmap, int pnum,
911 enum bitmap_page_attr attr)
913 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
916 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
917 enum bitmap_page_attr attr)
919 return test_and_clear_bit((pnum<<2) + attr,
920 bitmap->storage.filemap_attr);
923 * bitmap_file_set_bit -- called before performing a write to the md device
924 * to set (and eventually sync) a particular bit in the bitmap file
926 * we set the bit immediately, then we record the page number so that
927 * when an unplug occurs, we can flush the dirty pages out to disk
929 static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
934 unsigned long chunk = block >> bitmap->counts.chunkshift;
935 struct bitmap_storage *store = &bitmap->storage;
936 unsigned long node_offset = 0;
938 if (mddev_is_clustered(bitmap->mddev))
939 node_offset = bitmap->cluster_slot * store->file_pages;
941 page = filemap_get_page(&bitmap->storage, chunk);
944 bit = file_page_offset(&bitmap->storage, chunk);
947 kaddr = kmap_atomic(page);
948 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
951 set_bit_le(bit, kaddr);
952 kunmap_atomic(kaddr);
953 pr_debug("set file bit %lu page %lu\n", bit, page->index);
954 /* record page number so it gets flushed to disk when unplug occurs */
955 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
958 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
963 unsigned long chunk = block >> bitmap->counts.chunkshift;
964 struct bitmap_storage *store = &bitmap->storage;
965 unsigned long node_offset = 0;
967 if (mddev_is_clustered(bitmap->mddev))
968 node_offset = bitmap->cluster_slot * store->file_pages;
970 page = filemap_get_page(&bitmap->storage, chunk);
973 bit = file_page_offset(&bitmap->storage, chunk);
974 paddr = kmap_atomic(page);
975 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
976 clear_bit(bit, paddr);
978 clear_bit_le(bit, paddr);
979 kunmap_atomic(paddr);
980 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
981 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
982 bitmap->allclean = 0;
986 static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
991 unsigned long chunk = block >> bitmap->counts.chunkshift;
994 page = filemap_get_page(&bitmap->storage, chunk);
997 bit = file_page_offset(&bitmap->storage, chunk);
998 paddr = kmap_atomic(page);
999 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1000 set = test_bit(bit, paddr);
1002 set = test_bit_le(bit, paddr);
1003 kunmap_atomic(paddr);
1008 /* this gets called when the md device is ready to unplug its underlying
1009 * (slave) device queues -- before we let any writes go down, we need to
1010 * sync the dirty pages of the bitmap file to disk */
1011 void md_bitmap_unplug(struct bitmap *bitmap)
1014 int dirty, need_write;
1017 if (!bitmap || !bitmap->storage.filemap ||
1018 test_bit(BITMAP_STALE, &bitmap->flags))
1021 /* look at each page to see if there are any set bits that need to be
1022 * flushed out to disk */
1023 for (i = 0; i < bitmap->storage.file_pages; i++) {
1024 if (!bitmap->storage.filemap)
1026 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1027 need_write = test_and_clear_page_attr(bitmap, i,
1028 BITMAP_PAGE_NEEDWRITE);
1029 if (dirty || need_write) {
1031 md_bitmap_wait_writes(bitmap);
1032 if (bitmap->mddev->queue)
1033 blk_add_trace_msg(bitmap->mddev->queue,
1034 "md bitmap_unplug");
1036 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1037 write_page(bitmap, bitmap->storage.filemap[i], 0);
1042 md_bitmap_wait_writes(bitmap);
1044 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1045 md_bitmap_file_kick(bitmap);
1047 EXPORT_SYMBOL(md_bitmap_unplug);
1049 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1050 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1051 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1052 * memory mapping of the bitmap file
1054 * if there's no bitmap file, or if the bitmap file had been
1055 * previously kicked from the array, we mark all the bits as
1056 * 1's in order to cause a full resync.
1058 * We ignore all bits for sectors that end earlier than 'start'.
1059 * This is used when reading an out-of-date bitmap...
1061 static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1063 unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1064 struct page *page = NULL;
1065 unsigned long bit_cnt = 0;
1067 unsigned long offset;
1071 struct bitmap_storage *store = &bitmap->storage;
1073 chunks = bitmap->counts.chunks;
1076 if (!file && !bitmap->mddev->bitmap_info.offset) {
1077 /* No permanent bitmap - fill with '1s'. */
1078 store->filemap = NULL;
1079 store->file_pages = 0;
1080 for (i = 0; i < chunks ; i++) {
1081 /* if the disk bit is set, set the memory bit */
1082 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1084 md_bitmap_set_memory_bits(bitmap,
1085 (sector_t)i << bitmap->counts.chunkshift,
1091 outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1093 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
1095 if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1096 pr_warn("%s: bitmap file too short %lu < %lu\n",
1098 (unsigned long) i_size_read(file->f_mapping->host),
1105 if (!bitmap->mddev->bitmap_info.external)
1106 offset = sizeof(bitmap_super_t);
1108 if (mddev_is_clustered(bitmap->mddev))
1109 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1111 for (i = 0; i < chunks; i++) {
1113 index = file_page_index(&bitmap->storage, i);
1114 bit = file_page_offset(&bitmap->storage, i);
1115 if (index != oldindex) { /* this is a new page, read it in */
1117 /* unmap the old page, we're done with it */
1118 if (index == store->file_pages-1)
1119 count = store->bytes - index * PAGE_SIZE;
1122 page = store->filemap[index];
1124 ret = read_page(file, index, bitmap,
1129 bitmap->mddev->bitmap_info.offset,
1131 index + node_offset, count);
1140 * if bitmap is out of date, dirty the
1141 * whole page and write it out
1143 paddr = kmap_atomic(page);
1144 memset(paddr + offset, 0xff,
1145 PAGE_SIZE - offset);
1146 kunmap_atomic(paddr);
1147 write_page(bitmap, page, 1);
1150 if (test_bit(BITMAP_WRITE_ERROR,
1155 paddr = kmap_atomic(page);
1156 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1157 b = test_bit(bit, paddr);
1159 b = test_bit_le(bit, paddr);
1160 kunmap_atomic(paddr);
1162 /* if the disk bit is set, set the memory bit */
1163 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1165 md_bitmap_set_memory_bits(bitmap,
1166 (sector_t)i << bitmap->counts.chunkshift,
1173 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1174 bmname(bitmap), store->file_pages,
1180 pr_warn("%s: bitmap initialisation failed: %d\n",
1181 bmname(bitmap), ret);
1185 void md_bitmap_write_all(struct bitmap *bitmap)
1187 /* We don't actually write all bitmap blocks here,
1188 * just flag them as needing to be written
1192 if (!bitmap || !bitmap->storage.filemap)
1194 if (bitmap->storage.file)
1195 /* Only one copy, so nothing needed */
1198 for (i = 0; i < bitmap->storage.file_pages; i++)
1199 set_page_attr(bitmap, i,
1200 BITMAP_PAGE_NEEDWRITE);
1201 bitmap->allclean = 0;
1204 static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1205 sector_t offset, int inc)
1207 sector_t chunk = offset >> bitmap->chunkshift;
1208 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1209 bitmap->bp[page].count += inc;
1210 md_bitmap_checkfree(bitmap, page);
1213 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1215 sector_t chunk = offset >> bitmap->chunkshift;
1216 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1217 struct bitmap_page *bp = &bitmap->bp[page];
1223 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1224 sector_t offset, sector_t *blocks,
1228 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1232 void md_bitmap_daemon_work(struct mddev *mddev)
1234 struct bitmap *bitmap;
1236 unsigned long nextpage;
1238 struct bitmap_counts *counts;
1240 /* Use a mutex to guard daemon_work against
1243 mutex_lock(&mddev->bitmap_info.mutex);
1244 bitmap = mddev->bitmap;
1245 if (bitmap == NULL) {
1246 mutex_unlock(&mddev->bitmap_info.mutex);
1249 if (time_before(jiffies, bitmap->daemon_lastrun
1250 + mddev->bitmap_info.daemon_sleep))
1253 bitmap->daemon_lastrun = jiffies;
1254 if (bitmap->allclean) {
1255 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1258 bitmap->allclean = 1;
1260 if (bitmap->mddev->queue)
1261 blk_add_trace_msg(bitmap->mddev->queue,
1262 "md bitmap_daemon_work");
1264 /* Any file-page which is PENDING now needs to be written.
1265 * So set NEEDWRITE now, then after we make any last-minute changes
1268 for (j = 0; j < bitmap->storage.file_pages; j++)
1269 if (test_and_clear_page_attr(bitmap, j,
1270 BITMAP_PAGE_PENDING))
1271 set_page_attr(bitmap, j,
1272 BITMAP_PAGE_NEEDWRITE);
1274 if (bitmap->need_sync &&
1275 mddev->bitmap_info.external == 0) {
1276 /* Arrange for superblock update as well as
1279 bitmap->need_sync = 0;
1280 if (bitmap->storage.filemap) {
1281 sb = kmap_atomic(bitmap->storage.sb_page);
1282 sb->events_cleared =
1283 cpu_to_le64(bitmap->events_cleared);
1285 set_page_attr(bitmap, 0,
1286 BITMAP_PAGE_NEEDWRITE);
1289 /* Now look at the bitmap counters and if any are '2' or '1',
1290 * decrement and handle accordingly.
1292 counts = &bitmap->counts;
1293 spin_lock_irq(&counts->lock);
1295 for (j = 0; j < counts->chunks; j++) {
1296 bitmap_counter_t *bmc;
1297 sector_t block = (sector_t)j << counts->chunkshift;
1299 if (j == nextpage) {
1300 nextpage += PAGE_COUNTER_RATIO;
1301 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1302 j |= PAGE_COUNTER_MASK;
1305 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1308 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1310 j |= PAGE_COUNTER_MASK;
1313 if (*bmc == 1 && !bitmap->need_sync) {
1314 /* We can clear the bit */
1316 md_bitmap_count_page(counts, block, -1);
1317 md_bitmap_file_clear_bit(bitmap, block);
1318 } else if (*bmc && *bmc <= 2) {
1320 md_bitmap_set_pending(counts, block);
1321 bitmap->allclean = 0;
1324 spin_unlock_irq(&counts->lock);
1326 md_bitmap_wait_writes(bitmap);
1327 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1328 * DIRTY pages need to be written by bitmap_unplug so it can wait
1330 * If we find any DIRTY page we stop there and let bitmap_unplug
1331 * handle all the rest. This is important in the case where
1332 * the first blocking holds the superblock and it has been updated.
1333 * We mustn't write any other blocks before the superblock.
1336 j < bitmap->storage.file_pages
1337 && !test_bit(BITMAP_STALE, &bitmap->flags);
1339 if (test_page_attr(bitmap, j,
1341 /* bitmap_unplug will handle the rest */
1343 if (test_and_clear_page_attr(bitmap, j,
1344 BITMAP_PAGE_NEEDWRITE)) {
1345 write_page(bitmap, bitmap->storage.filemap[j], 0);
1350 if (bitmap->allclean == 0)
1351 mddev->thread->timeout =
1352 mddev->bitmap_info.daemon_sleep;
1353 mutex_unlock(&mddev->bitmap_info.mutex);
1356 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1357 sector_t offset, sector_t *blocks,
1359 __releases(bitmap->lock)
1360 __acquires(bitmap->lock)
1362 /* If 'create', we might release the lock and reclaim it.
1363 * The lock must have been taken with interrupts enabled.
1364 * If !create, we don't release the lock.
1366 sector_t chunk = offset >> bitmap->chunkshift;
1367 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1368 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1372 err = md_bitmap_checkpage(bitmap, page, create, 0);
1374 if (bitmap->bp[page].hijacked ||
1375 bitmap->bp[page].map == NULL)
1376 csize = ((sector_t)1) << (bitmap->chunkshift +
1377 PAGE_COUNTER_SHIFT);
1379 csize = ((sector_t)1) << bitmap->chunkshift;
1380 *blocks = csize - (offset & (csize - 1));
1385 /* now locked ... */
1387 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1388 /* should we use the first or second counter field
1389 * of the hijacked pointer? */
1390 int hi = (pageoff > PAGE_COUNTER_MASK);
1391 return &((bitmap_counter_t *)
1392 &bitmap->bp[page].map)[hi];
1393 } else /* page is allocated */
1394 return (bitmap_counter_t *)
1395 &(bitmap->bp[page].map[pageoff]);
1398 int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1405 atomic_inc(&bitmap->behind_writes);
1406 bw = atomic_read(&bitmap->behind_writes);
1407 if (bw > bitmap->behind_writes_used)
1408 bitmap->behind_writes_used = bw;
1410 pr_debug("inc write-behind count %d/%lu\n",
1411 bw, bitmap->mddev->bitmap_info.max_write_behind);
1416 bitmap_counter_t *bmc;
1418 spin_lock_irq(&bitmap->counts.lock);
1419 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1421 spin_unlock_irq(&bitmap->counts.lock);
1425 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1426 DEFINE_WAIT(__wait);
1427 /* note that it is safe to do the prepare_to_wait
1428 * after the test as long as we do it before dropping
1431 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1432 TASK_UNINTERRUPTIBLE);
1433 spin_unlock_irq(&bitmap->counts.lock);
1435 finish_wait(&bitmap->overflow_wait, &__wait);
1441 md_bitmap_file_set_bit(bitmap, offset);
1442 md_bitmap_count_page(&bitmap->counts, offset, 1);
1450 spin_unlock_irq(&bitmap->counts.lock);
1453 if (sectors > blocks)
1460 EXPORT_SYMBOL(md_bitmap_startwrite);
1462 void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1463 unsigned long sectors, int success, int behind)
1468 if (atomic_dec_and_test(&bitmap->behind_writes))
1469 wake_up(&bitmap->behind_wait);
1470 pr_debug("dec write-behind count %d/%lu\n",
1471 atomic_read(&bitmap->behind_writes),
1472 bitmap->mddev->bitmap_info.max_write_behind);
1477 unsigned long flags;
1478 bitmap_counter_t *bmc;
1480 spin_lock_irqsave(&bitmap->counts.lock, flags);
1481 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1483 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1487 if (success && !bitmap->mddev->degraded &&
1488 bitmap->events_cleared < bitmap->mddev->events) {
1489 bitmap->events_cleared = bitmap->mddev->events;
1490 bitmap->need_sync = 1;
1491 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1494 if (!success && !NEEDED(*bmc))
1495 *bmc |= NEEDED_MASK;
1497 if (COUNTER(*bmc) == COUNTER_MAX)
1498 wake_up(&bitmap->overflow_wait);
1502 md_bitmap_set_pending(&bitmap->counts, offset);
1503 bitmap->allclean = 0;
1505 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1507 if (sectors > blocks)
1513 EXPORT_SYMBOL(md_bitmap_endwrite);
1515 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1518 bitmap_counter_t *bmc;
1520 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1522 return 1; /* always resync if no bitmap */
1524 spin_lock_irq(&bitmap->counts.lock);
1525 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1531 else if (NEEDED(*bmc)) {
1533 if (!degraded) { /* don't set/clear bits if degraded */
1534 *bmc |= RESYNC_MASK;
1535 *bmc &= ~NEEDED_MASK;
1539 spin_unlock_irq(&bitmap->counts.lock);
1543 int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1546 /* bitmap_start_sync must always report on multiples of whole
1547 * pages, otherwise resync (which is very PAGE_SIZE based) will
1549 * So call __bitmap_start_sync repeatedly (if needed) until
1550 * At least PAGE_SIZE>>9 blocks are covered.
1551 * Return the 'or' of the result.
1557 while (*blocks < (PAGE_SIZE>>9)) {
1558 rv |= __bitmap_start_sync(bitmap, offset,
1559 &blocks1, degraded);
1565 EXPORT_SYMBOL(md_bitmap_start_sync);
1567 void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1569 bitmap_counter_t *bmc;
1570 unsigned long flags;
1572 if (bitmap == NULL) {
1576 spin_lock_irqsave(&bitmap->counts.lock, flags);
1577 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1582 *bmc &= ~RESYNC_MASK;
1584 if (!NEEDED(*bmc) && aborted)
1585 *bmc |= NEEDED_MASK;
1588 md_bitmap_set_pending(&bitmap->counts, offset);
1589 bitmap->allclean = 0;
1594 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1596 EXPORT_SYMBOL(md_bitmap_end_sync);
1598 void md_bitmap_close_sync(struct bitmap *bitmap)
1600 /* Sync has finished, and any bitmap chunks that weren't synced
1601 * properly have been aborted. It remains to us to clear the
1602 * RESYNC bit wherever it is still on
1604 sector_t sector = 0;
1608 while (sector < bitmap->mddev->resync_max_sectors) {
1609 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1613 EXPORT_SYMBOL(md_bitmap_close_sync);
1615 void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1623 bitmap->last_end_sync = jiffies;
1626 if (!force && time_before(jiffies, (bitmap->last_end_sync
1627 + bitmap->mddev->bitmap_info.daemon_sleep)))
1629 wait_event(bitmap->mddev->recovery_wait,
1630 atomic_read(&bitmap->mddev->recovery_active) == 0);
1632 bitmap->mddev->curr_resync_completed = sector;
1633 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1634 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1636 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1637 md_bitmap_end_sync(bitmap, s, &blocks, 0);
1640 bitmap->last_end_sync = jiffies;
1641 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1643 EXPORT_SYMBOL(md_bitmap_cond_end_sync);
1645 void md_bitmap_sync_with_cluster(struct mddev *mddev,
1646 sector_t old_lo, sector_t old_hi,
1647 sector_t new_lo, sector_t new_hi)
1649 struct bitmap *bitmap = mddev->bitmap;
1650 sector_t sector, blocks = 0;
1652 for (sector = old_lo; sector < new_lo; ) {
1653 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1656 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1658 for (sector = old_hi; sector < new_hi; ) {
1659 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1662 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1664 EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
1666 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1668 /* For each chunk covered by any of these sectors, set the
1669 * counter to 2 and possibly set resync_needed. They should all
1670 * be 0 at this point
1674 bitmap_counter_t *bmc;
1675 spin_lock_irq(&bitmap->counts.lock);
1676 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1678 spin_unlock_irq(&bitmap->counts.lock);
1683 md_bitmap_count_page(&bitmap->counts, offset, 1);
1684 md_bitmap_set_pending(&bitmap->counts, offset);
1685 bitmap->allclean = 0;
1688 *bmc |= NEEDED_MASK;
1689 spin_unlock_irq(&bitmap->counts.lock);
1692 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1693 void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1695 unsigned long chunk;
1697 for (chunk = s; chunk <= e; chunk++) {
1698 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1699 md_bitmap_set_memory_bits(bitmap, sec, 1);
1700 md_bitmap_file_set_bit(bitmap, sec);
1701 if (sec < bitmap->mddev->recovery_cp)
1702 /* We are asserting that the array is dirty,
1703 * so move the recovery_cp address back so
1704 * that it is obvious that it is dirty
1706 bitmap->mddev->recovery_cp = sec;
1711 * flush out any pending updates
1713 void md_bitmap_flush(struct mddev *mddev)
1715 struct bitmap *bitmap = mddev->bitmap;
1718 if (!bitmap) /* there was no bitmap */
1721 /* run the daemon_work three time to ensure everything is flushed
1724 sleep = mddev->bitmap_info.daemon_sleep * 2;
1725 bitmap->daemon_lastrun -= sleep;
1726 md_bitmap_daemon_work(mddev);
1727 bitmap->daemon_lastrun -= sleep;
1728 md_bitmap_daemon_work(mddev);
1729 bitmap->daemon_lastrun -= sleep;
1730 md_bitmap_daemon_work(mddev);
1731 if (mddev->bitmap_info.external)
1732 md_super_wait(mddev);
1733 md_bitmap_update_sb(bitmap);
1737 * free memory that was allocated
1739 void md_bitmap_free(struct bitmap *bitmap)
1741 unsigned long k, pages;
1742 struct bitmap_page *bp;
1744 if (!bitmap) /* there was no bitmap */
1747 if (bitmap->sysfs_can_clear)
1748 sysfs_put(bitmap->sysfs_can_clear);
1750 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1751 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1752 md_cluster_stop(bitmap->mddev);
1754 /* Shouldn't be needed - but just in case.... */
1755 wait_event(bitmap->write_wait,
1756 atomic_read(&bitmap->pending_writes) == 0);
1758 /* release the bitmap file */
1759 md_bitmap_file_unmap(&bitmap->storage);
1761 bp = bitmap->counts.bp;
1762 pages = bitmap->counts.pages;
1764 /* free all allocated memory */
1766 if (bp) /* deallocate the page memory */
1767 for (k = 0; k < pages; k++)
1768 if (bp[k].map && !bp[k].hijacked)
1773 EXPORT_SYMBOL(md_bitmap_free);
1775 void md_bitmap_wait_behind_writes(struct mddev *mddev)
1777 struct bitmap *bitmap = mddev->bitmap;
1779 /* wait for behind writes to complete */
1780 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1781 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1783 /* need to kick something here to make sure I/O goes? */
1784 wait_event(bitmap->behind_wait,
1785 atomic_read(&bitmap->behind_writes) == 0);
1789 void md_bitmap_destroy(struct mddev *mddev)
1791 struct bitmap *bitmap = mddev->bitmap;
1793 if (!bitmap) /* there was no bitmap */
1796 md_bitmap_wait_behind_writes(mddev);
1797 mempool_destroy(mddev->wb_info_pool);
1798 mddev->wb_info_pool = NULL;
1800 mutex_lock(&mddev->bitmap_info.mutex);
1801 spin_lock(&mddev->lock);
1802 mddev->bitmap = NULL; /* disconnect from the md device */
1803 spin_unlock(&mddev->lock);
1804 mutex_unlock(&mddev->bitmap_info.mutex);
1806 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1808 md_bitmap_free(bitmap);
1812 * initialize the bitmap structure
1813 * if this returns an error, bitmap_destroy must be called to do clean up
1814 * once mddev->bitmap is set
1816 struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
1818 struct bitmap *bitmap;
1819 sector_t blocks = mddev->resync_max_sectors;
1820 struct file *file = mddev->bitmap_info.file;
1822 struct kernfs_node *bm = NULL;
1824 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1826 BUG_ON(file && mddev->bitmap_info.offset);
1828 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1829 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1831 return ERR_PTR(-EBUSY);
1834 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1836 return ERR_PTR(-ENOMEM);
1838 spin_lock_init(&bitmap->counts.lock);
1839 atomic_set(&bitmap->pending_writes, 0);
1840 init_waitqueue_head(&bitmap->write_wait);
1841 init_waitqueue_head(&bitmap->overflow_wait);
1842 init_waitqueue_head(&bitmap->behind_wait);
1844 bitmap->mddev = mddev;
1845 bitmap->cluster_slot = slot;
1848 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1850 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1853 bitmap->sysfs_can_clear = NULL;
1855 bitmap->storage.file = file;
1858 /* As future accesses to this file will use bmap,
1859 * and bypass the page cache, we must sync the file
1864 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1865 if (!mddev->bitmap_info.external) {
1867 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1868 * instructing us to create a new on-disk bitmap instance.
1870 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1871 err = md_bitmap_new_disk_sb(bitmap);
1873 err = md_bitmap_read_sb(bitmap);
1876 if (mddev->bitmap_info.chunksize == 0 ||
1877 mddev->bitmap_info.daemon_sleep == 0)
1878 /* chunksize and time_base need to be
1885 bitmap->daemon_lastrun = jiffies;
1886 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
1890 pr_debug("created bitmap (%lu pages) for device %s\n",
1891 bitmap->counts.pages, bmname(bitmap));
1893 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1899 md_bitmap_free(bitmap);
1900 return ERR_PTR(err);
1903 int md_bitmap_load(struct mddev *mddev)
1907 sector_t sector = 0;
1908 struct bitmap *bitmap = mddev->bitmap;
1909 struct md_rdev *rdev;
1914 rdev_for_each(rdev, mddev)
1915 mddev_create_wb_pool(mddev, rdev, true);
1917 if (mddev_is_clustered(mddev))
1918 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1920 /* Clear out old bitmap info first: Either there is none, or we
1921 * are resuming after someone else has possibly changed things,
1922 * so we should forget old cached info.
1923 * All chunks should be clean, but some might need_sync.
1925 while (sector < mddev->resync_max_sectors) {
1927 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1930 md_bitmap_close_sync(bitmap);
1932 if (mddev->degraded == 0
1933 || bitmap->events_cleared == mddev->events)
1934 /* no need to keep dirty bits to optimise a
1935 * re-add of a missing device */
1936 start = mddev->recovery_cp;
1938 mutex_lock(&mddev->bitmap_info.mutex);
1939 err = md_bitmap_init_from_disk(bitmap, start);
1940 mutex_unlock(&mddev->bitmap_info.mutex);
1944 clear_bit(BITMAP_STALE, &bitmap->flags);
1946 /* Kick recovery in case any bits were set */
1947 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1949 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1950 md_wakeup_thread(mddev->thread);
1952 md_bitmap_update_sb(bitmap);
1954 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1959 EXPORT_SYMBOL_GPL(md_bitmap_load);
1961 /* caller need to free returned bitmap with md_bitmap_free() */
1962 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1965 struct bitmap *bitmap;
1967 bitmap = md_bitmap_create(mddev, slot);
1968 if (IS_ERR(bitmap)) {
1969 rv = PTR_ERR(bitmap);
1973 rv = md_bitmap_init_from_disk(bitmap, 0);
1975 md_bitmap_free(bitmap);
1981 EXPORT_SYMBOL(get_bitmap_from_slot);
1983 /* Loads the bitmap associated with slot and copies the resync information
1986 int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
1987 sector_t *low, sector_t *high, bool clear_bits)
1990 sector_t block, lo = 0, hi = 0;
1991 struct bitmap_counts *counts;
1992 struct bitmap *bitmap;
1994 bitmap = get_bitmap_from_slot(mddev, slot);
1995 if (IS_ERR(bitmap)) {
1996 pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
2000 counts = &bitmap->counts;
2001 for (j = 0; j < counts->chunks; j++) {
2002 block = (sector_t)j << counts->chunkshift;
2003 if (md_bitmap_file_test_bit(bitmap, block)) {
2007 md_bitmap_file_clear_bit(bitmap, block);
2008 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2009 md_bitmap_file_set_bit(mddev->bitmap, block);
2014 md_bitmap_update_sb(bitmap);
2015 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2016 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2017 for (i = 0; i < bitmap->storage.file_pages; i++)
2018 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2019 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2020 md_bitmap_unplug(bitmap);
2022 md_bitmap_unplug(mddev->bitmap);
2025 md_bitmap_free(bitmap);
2029 EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
2032 void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2034 unsigned long chunk_kb;
2035 struct bitmap_counts *counts;
2040 counts = &bitmap->counts;
2042 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2043 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2045 counts->pages - counts->missing_pages,
2047 (counts->pages - counts->missing_pages)
2048 << (PAGE_SHIFT - 10),
2049 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2050 chunk_kb ? "KB" : "B");
2051 if (bitmap->storage.file) {
2052 seq_printf(seq, ", file: ");
2053 seq_file_path(seq, bitmap->storage.file, " \t\n");
2056 seq_printf(seq, "\n");
2059 int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2060 int chunksize, int init)
2062 /* If chunk_size is 0, choose an appropriate chunk size.
2063 * Then possibly allocate new storage space.
2064 * Then quiesce, copy bits, replace bitmap, and re-start
2066 * This function is called both to set up the initial bitmap
2067 * and to resize the bitmap while the array is active.
2068 * If this happens as a result of the array being resized,
2069 * chunksize will be zero, and we need to choose a suitable
2070 * chunksize, otherwise we use what we are given.
2072 struct bitmap_storage store;
2073 struct bitmap_counts old_counts;
2074 unsigned long chunks;
2076 sector_t old_blocks, new_blocks;
2080 struct bitmap_page *new_bp;
2082 if (bitmap->storage.file && !init) {
2083 pr_info("md: cannot resize file-based bitmap\n");
2087 if (chunksize == 0) {
2088 /* If there is enough space, leave the chunk size unchanged,
2089 * else increase by factor of two until there is enough space.
2092 long space = bitmap->mddev->bitmap_info.space;
2095 /* We don't know how much space there is, so limit
2096 * to current size - in sectors.
2098 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2099 if (!bitmap->mddev->bitmap_info.external)
2100 bytes += sizeof(bitmap_super_t);
2101 space = DIV_ROUND_UP(bytes, 512);
2102 bitmap->mddev->bitmap_info.space = space;
2104 chunkshift = bitmap->counts.chunkshift;
2107 /* 'chunkshift' is shift from block size to chunk size */
2109 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2110 bytes = DIV_ROUND_UP(chunks, 8);
2111 if (!bitmap->mddev->bitmap_info.external)
2112 bytes += sizeof(bitmap_super_t);
2113 } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
2114 (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
2116 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2118 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2119 memset(&store, 0, sizeof(store));
2120 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2121 ret = md_bitmap_storage_alloc(&store, chunks,
2122 !bitmap->mddev->bitmap_info.external,
2123 mddev_is_clustered(bitmap->mddev)
2124 ? bitmap->cluster_slot : 0);
2126 md_bitmap_file_unmap(&store);
2130 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2132 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
2135 md_bitmap_file_unmap(&store);
2140 bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2142 store.file = bitmap->storage.file;
2143 bitmap->storage.file = NULL;
2145 if (store.sb_page && bitmap->storage.sb_page)
2146 memcpy(page_address(store.sb_page),
2147 page_address(bitmap->storage.sb_page),
2148 sizeof(bitmap_super_t));
2149 spin_lock_irq(&bitmap->counts.lock);
2150 md_bitmap_file_unmap(&bitmap->storage);
2151 bitmap->storage = store;
2153 old_counts = bitmap->counts;
2154 bitmap->counts.bp = new_bp;
2155 bitmap->counts.pages = pages;
2156 bitmap->counts.missing_pages = pages;
2157 bitmap->counts.chunkshift = chunkshift;
2158 bitmap->counts.chunks = chunks;
2159 bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
2160 BITMAP_BLOCK_SHIFT);
2162 blocks = min(old_counts.chunks << old_counts.chunkshift,
2163 chunks << chunkshift);
2165 /* For cluster raid, need to pre-allocate bitmap */
2166 if (mddev_is_clustered(bitmap->mddev)) {
2168 for (page = 0; page < pages; page++) {
2169 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2173 /* deallocate the page memory */
2174 for (k = 0; k < page; k++) {
2175 kfree(new_bp[k].map);
2179 /* restore some fields from old_counts */
2180 bitmap->counts.bp = old_counts.bp;
2181 bitmap->counts.pages = old_counts.pages;
2182 bitmap->counts.missing_pages = old_counts.pages;
2183 bitmap->counts.chunkshift = old_counts.chunkshift;
2184 bitmap->counts.chunks = old_counts.chunks;
2185 bitmap->mddev->bitmap_info.chunksize =
2186 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
2187 blocks = old_counts.chunks << old_counts.chunkshift;
2188 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2191 bitmap->counts.bp[page].count += 1;
2195 for (block = 0; block < blocks; ) {
2196 bitmap_counter_t *bmc_old, *bmc_new;
2199 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2200 set = bmc_old && NEEDED(*bmc_old);
2203 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2205 if (*bmc_new == 0) {
2206 /* need to set on-disk bits too. */
2207 sector_t end = block + new_blocks;
2208 sector_t start = block >> chunkshift;
2210 start <<= chunkshift;
2211 while (start < end) {
2212 md_bitmap_file_set_bit(bitmap, block);
2213 start += 1 << chunkshift;
2216 md_bitmap_count_page(&bitmap->counts, block, 1);
2217 md_bitmap_set_pending(&bitmap->counts, block);
2219 *bmc_new |= NEEDED_MASK;
2221 if (new_blocks < old_blocks)
2222 old_blocks = new_blocks;
2224 block += old_blocks;
2227 if (bitmap->counts.bp != old_counts.bp) {
2229 for (k = 0; k < old_counts.pages; k++)
2230 if (!old_counts.bp[k].hijacked)
2231 kfree(old_counts.bp[k].map);
2232 kfree(old_counts.bp);
2237 while (block < (chunks << chunkshift)) {
2238 bitmap_counter_t *bmc;
2239 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2241 /* new space. It needs to be resynced, so
2242 * we set NEEDED_MASK.
2245 *bmc = NEEDED_MASK | 2;
2246 md_bitmap_count_page(&bitmap->counts, block, 1);
2247 md_bitmap_set_pending(&bitmap->counts, block);
2250 block += new_blocks;
2252 for (i = 0; i < bitmap->storage.file_pages; i++)
2253 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2255 spin_unlock_irq(&bitmap->counts.lock);
2258 md_bitmap_unplug(bitmap);
2259 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2265 EXPORT_SYMBOL_GPL(md_bitmap_resize);
2268 location_show(struct mddev *mddev, char *page)
2271 if (mddev->bitmap_info.file)
2272 len = sprintf(page, "file");
2273 else if (mddev->bitmap_info.offset)
2274 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2276 len = sprintf(page, "none");
2277 len += sprintf(page+len, "\n");
2282 location_store(struct mddev *mddev, const char *buf, size_t len)
2286 rv = mddev_lock(mddev);
2290 if (!mddev->pers->quiesce) {
2294 if (mddev->recovery || mddev->sync_thread) {
2300 if (mddev->bitmap || mddev->bitmap_info.file ||
2301 mddev->bitmap_info.offset) {
2302 /* bitmap already configured. Only option is to clear it */
2303 if (strncmp(buf, "none", 4) != 0) {
2308 mddev_suspend(mddev);
2309 md_bitmap_destroy(mddev);
2310 mddev_resume(mddev);
2312 mddev->bitmap_info.offset = 0;
2313 if (mddev->bitmap_info.file) {
2314 struct file *f = mddev->bitmap_info.file;
2315 mddev->bitmap_info.file = NULL;
2319 /* No bitmap, OK to set a location */
2321 if (strncmp(buf, "none", 4) == 0)
2322 /* nothing to be done */;
2323 else if (strncmp(buf, "file:", 5) == 0) {
2324 /* Not supported yet */
2329 rv = kstrtoll(buf+1, 10, &offset);
2331 rv = kstrtoll(buf, 10, &offset);
2338 if (mddev->bitmap_info.external == 0 &&
2339 mddev->major_version == 0 &&
2340 offset != mddev->bitmap_info.default_offset) {
2344 mddev->bitmap_info.offset = offset;
2346 struct bitmap *bitmap;
2347 bitmap = md_bitmap_create(mddev, -1);
2348 mddev_suspend(mddev);
2350 rv = PTR_ERR(bitmap);
2352 mddev->bitmap = bitmap;
2353 rv = md_bitmap_load(mddev);
2355 mddev->bitmap_info.offset = 0;
2358 md_bitmap_destroy(mddev);
2359 mddev_resume(mddev);
2362 mddev_resume(mddev);
2366 if (!mddev->external) {
2367 /* Ensure new bitmap info is stored in
2368 * metadata promptly.
2370 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2371 md_wakeup_thread(mddev->thread);
2375 mddev_unlock(mddev);
2381 static struct md_sysfs_entry bitmap_location =
2382 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2384 /* 'bitmap/space' is the space available at 'location' for the
2385 * bitmap. This allows the kernel to know when it is safe to
2386 * resize the bitmap to match a resized array.
2389 space_show(struct mddev *mddev, char *page)
2391 return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2395 space_store(struct mddev *mddev, const char *buf, size_t len)
2397 unsigned long sectors;
2400 rv = kstrtoul(buf, 10, §ors);
2407 if (mddev->bitmap &&
2408 sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2409 return -EFBIG; /* Bitmap is too big for this small space */
2411 /* could make sure it isn't too big, but that isn't really
2412 * needed - user-space should be careful.
2414 mddev->bitmap_info.space = sectors;
2418 static struct md_sysfs_entry bitmap_space =
2419 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2422 timeout_show(struct mddev *mddev, char *page)
2425 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2426 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2428 len = sprintf(page, "%lu", secs);
2430 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2431 len += sprintf(page+len, "\n");
2436 timeout_store(struct mddev *mddev, const char *buf, size_t len)
2438 /* timeout can be set at any time */
2439 unsigned long timeout;
2440 int rv = strict_strtoul_scaled(buf, &timeout, 4);
2444 /* just to make sure we don't overflow... */
2445 if (timeout >= LONG_MAX / HZ)
2448 timeout = timeout * HZ / 10000;
2450 if (timeout >= MAX_SCHEDULE_TIMEOUT)
2451 timeout = MAX_SCHEDULE_TIMEOUT-1;
2454 mddev->bitmap_info.daemon_sleep = timeout;
2455 if (mddev->thread) {
2456 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2457 * the bitmap is all clean and we don't need to
2458 * adjust the timeout right now
2460 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
2461 mddev->thread->timeout = timeout;
2462 md_wakeup_thread(mddev->thread);
2468 static struct md_sysfs_entry bitmap_timeout =
2469 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2472 backlog_show(struct mddev *mddev, char *page)
2474 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2478 backlog_store(struct mddev *mddev, const char *buf, size_t len)
2480 unsigned long backlog;
2481 unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
2482 int rv = kstrtoul(buf, 10, &backlog);
2485 if (backlog > COUNTER_MAX)
2487 mddev->bitmap_info.max_write_behind = backlog;
2488 if (!backlog && mddev->wb_info_pool) {
2489 /* wb_info_pool is not needed if backlog is zero */
2490 mempool_destroy(mddev->wb_info_pool);
2491 mddev->wb_info_pool = NULL;
2492 } else if (backlog && !mddev->wb_info_pool) {
2493 /* wb_info_pool is needed since backlog is not zero */
2494 struct md_rdev *rdev;
2496 rdev_for_each(rdev, mddev)
2497 mddev_create_wb_pool(mddev, rdev, false);
2499 if (old_mwb != backlog)
2500 md_bitmap_update_sb(mddev->bitmap);
2504 static struct md_sysfs_entry bitmap_backlog =
2505 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2508 chunksize_show(struct mddev *mddev, char *page)
2510 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2514 chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2516 /* Can only be changed when no bitmap is active */
2518 unsigned long csize;
2521 rv = kstrtoul(buf, 10, &csize);
2525 !is_power_of_2(csize))
2527 if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
2528 sizeof(((bitmap_super_t *)0)->chunksize))))
2530 mddev->bitmap_info.chunksize = csize;
2534 static struct md_sysfs_entry bitmap_chunksize =
2535 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2537 static ssize_t metadata_show(struct mddev *mddev, char *page)
2539 if (mddev_is_clustered(mddev))
2540 return sprintf(page, "clustered\n");
2541 return sprintf(page, "%s\n", (mddev->bitmap_info.external
2542 ? "external" : "internal"));
2545 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2547 if (mddev->bitmap ||
2548 mddev->bitmap_info.file ||
2549 mddev->bitmap_info.offset)
2551 if (strncmp(buf, "external", 8) == 0)
2552 mddev->bitmap_info.external = 1;
2553 else if ((strncmp(buf, "internal", 8) == 0) ||
2554 (strncmp(buf, "clustered", 9) == 0))
2555 mddev->bitmap_info.external = 0;
2561 static struct md_sysfs_entry bitmap_metadata =
2562 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2564 static ssize_t can_clear_show(struct mddev *mddev, char *page)
2567 spin_lock(&mddev->lock);
2569 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2572 len = sprintf(page, "\n");
2573 spin_unlock(&mddev->lock);
2577 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2579 if (mddev->bitmap == NULL)
2581 if (strncmp(buf, "false", 5) == 0)
2582 mddev->bitmap->need_sync = 1;
2583 else if (strncmp(buf, "true", 4) == 0) {
2584 if (mddev->degraded)
2586 mddev->bitmap->need_sync = 0;
2592 static struct md_sysfs_entry bitmap_can_clear =
2593 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2596 behind_writes_used_show(struct mddev *mddev, char *page)
2599 spin_lock(&mddev->lock);
2600 if (mddev->bitmap == NULL)
2601 ret = sprintf(page, "0\n");
2603 ret = sprintf(page, "%lu\n",
2604 mddev->bitmap->behind_writes_used);
2605 spin_unlock(&mddev->lock);
2610 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2613 mddev->bitmap->behind_writes_used = 0;
2617 static struct md_sysfs_entry max_backlog_used =
2618 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2619 behind_writes_used_show, behind_writes_used_reset);
2621 static struct attribute *md_bitmap_attrs[] = {
2622 &bitmap_location.attr,
2624 &bitmap_timeout.attr,
2625 &bitmap_backlog.attr,
2626 &bitmap_chunksize.attr,
2627 &bitmap_metadata.attr,
2628 &bitmap_can_clear.attr,
2629 &max_backlog_used.attr,
2632 struct attribute_group md_bitmap_group = {
2634 .attrs = md_bitmap_attrs,