2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpuhotplug.h>
36 #include <linux/part_stat.h>
40 static DEFINE_IDR(zram_index_idr);
41 /* idr index must be protected */
42 static DEFINE_MUTEX(zram_index_mutex);
44 static int zram_major;
45 static const char *default_compressor = "lzo-rle";
47 /* Module params (documentation at end) */
48 static unsigned int num_devices = 1;
50 * Pages that compress to sizes equals or greater than this are stored
51 * uncompressed in memory.
53 static size_t huge_class_size;
55 static const struct block_device_operations zram_devops;
56 static const struct block_device_operations zram_wb_devops;
58 static void zram_free_page(struct zram *zram, size_t index);
59 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
60 u32 index, int offset, struct bio *bio);
63 static int zram_slot_trylock(struct zram *zram, u32 index)
65 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
68 static void zram_slot_lock(struct zram *zram, u32 index)
70 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
73 static void zram_slot_unlock(struct zram *zram, u32 index)
75 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
78 static inline bool init_done(struct zram *zram)
80 return zram->disksize;
83 static inline struct zram *dev_to_zram(struct device *dev)
85 return (struct zram *)dev_to_disk(dev)->private_data;
88 static unsigned long zram_get_handle(struct zram *zram, u32 index)
90 return zram->table[index].handle;
93 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
95 zram->table[index].handle = handle;
98 /* flag operations require table entry bit_spin_lock() being held */
99 static bool zram_test_flag(struct zram *zram, u32 index,
100 enum zram_pageflags flag)
102 return zram->table[index].flags & BIT(flag);
105 static void zram_set_flag(struct zram *zram, u32 index,
106 enum zram_pageflags flag)
108 zram->table[index].flags |= BIT(flag);
111 static void zram_clear_flag(struct zram *zram, u32 index,
112 enum zram_pageflags flag)
114 zram->table[index].flags &= ~BIT(flag);
117 static inline void zram_set_element(struct zram *zram, u32 index,
118 unsigned long element)
120 zram->table[index].element = element;
123 static unsigned long zram_get_element(struct zram *zram, u32 index)
125 return zram->table[index].element;
128 static size_t zram_get_obj_size(struct zram *zram, u32 index)
130 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
133 static void zram_set_obj_size(struct zram *zram,
134 u32 index, size_t size)
136 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
138 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
141 static inline bool zram_allocated(struct zram *zram, u32 index)
143 return zram_get_obj_size(zram, index) ||
144 zram_test_flag(zram, index, ZRAM_SAME) ||
145 zram_test_flag(zram, index, ZRAM_WB);
148 #if PAGE_SIZE != 4096
149 static inline bool is_partial_io(struct bio_vec *bvec)
151 return bvec->bv_len != PAGE_SIZE;
154 static inline bool is_partial_io(struct bio_vec *bvec)
161 * Check if request is within bounds and aligned on zram logical blocks.
163 static inline bool valid_io_request(struct zram *zram,
164 sector_t start, unsigned int size)
168 /* unaligned request */
169 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
171 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
174 end = start + (size >> SECTOR_SHIFT);
175 bound = zram->disksize >> SECTOR_SHIFT;
176 /* out of range range */
177 if (unlikely(start >= bound || end > bound || start > end))
180 /* I/O request is valid */
184 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
186 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
187 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
190 static inline void update_used_max(struct zram *zram,
191 const unsigned long pages)
193 unsigned long old_max, cur_max;
195 old_max = atomic_long_read(&zram->stats.max_used_pages);
200 old_max = atomic_long_cmpxchg(
201 &zram->stats.max_used_pages, cur_max, pages);
202 } while (old_max != cur_max);
205 static inline void zram_fill_page(void *ptr, unsigned long len,
208 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
209 memset_l(ptr, value, len / sizeof(unsigned long));
212 static bool page_same_filled(void *ptr, unsigned long *element)
216 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
218 page = (unsigned long *)ptr;
221 if (val != page[last_pos])
224 for (pos = 1; pos < last_pos; pos++) {
225 if (val != page[pos])
234 static ssize_t initstate_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
238 struct zram *zram = dev_to_zram(dev);
240 down_read(&zram->init_lock);
241 val = init_done(zram);
242 up_read(&zram->init_lock);
244 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
247 static ssize_t disksize_show(struct device *dev,
248 struct device_attribute *attr, char *buf)
250 struct zram *zram = dev_to_zram(dev);
252 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
255 static ssize_t mem_limit_store(struct device *dev,
256 struct device_attribute *attr, const char *buf, size_t len)
260 struct zram *zram = dev_to_zram(dev);
262 limit = memparse(buf, &tmp);
263 if (buf == tmp) /* no chars parsed, invalid input */
266 down_write(&zram->init_lock);
267 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
268 up_write(&zram->init_lock);
273 static ssize_t mem_used_max_store(struct device *dev,
274 struct device_attribute *attr, const char *buf, size_t len)
278 struct zram *zram = dev_to_zram(dev);
280 err = kstrtoul(buf, 10, &val);
284 down_read(&zram->init_lock);
285 if (init_done(zram)) {
286 atomic_long_set(&zram->stats.max_used_pages,
287 zs_get_total_pages(zram->mem_pool));
289 up_read(&zram->init_lock);
294 static ssize_t idle_store(struct device *dev,
295 struct device_attribute *attr, const char *buf, size_t len)
297 struct zram *zram = dev_to_zram(dev);
298 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
301 if (!sysfs_streq(buf, "all"))
304 down_read(&zram->init_lock);
305 if (!init_done(zram)) {
306 up_read(&zram->init_lock);
310 for (index = 0; index < nr_pages; index++) {
312 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
313 * See the comment in writeback_store.
315 zram_slot_lock(zram, index);
316 if (zram_allocated(zram, index) &&
317 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
318 zram_set_flag(zram, index, ZRAM_IDLE);
319 zram_slot_unlock(zram, index);
322 up_read(&zram->init_lock);
327 #ifdef CONFIG_ZRAM_WRITEBACK
328 static ssize_t writeback_limit_enable_store(struct device *dev,
329 struct device_attribute *attr, const char *buf, size_t len)
331 struct zram *zram = dev_to_zram(dev);
333 ssize_t ret = -EINVAL;
335 if (kstrtoull(buf, 10, &val))
338 down_read(&zram->init_lock);
339 spin_lock(&zram->wb_limit_lock);
340 zram->wb_limit_enable = val;
341 spin_unlock(&zram->wb_limit_lock);
342 up_read(&zram->init_lock);
348 static ssize_t writeback_limit_enable_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
352 struct zram *zram = dev_to_zram(dev);
354 down_read(&zram->init_lock);
355 spin_lock(&zram->wb_limit_lock);
356 val = zram->wb_limit_enable;
357 spin_unlock(&zram->wb_limit_lock);
358 up_read(&zram->init_lock);
360 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
363 static ssize_t writeback_limit_store(struct device *dev,
364 struct device_attribute *attr, const char *buf, size_t len)
366 struct zram *zram = dev_to_zram(dev);
368 ssize_t ret = -EINVAL;
370 if (kstrtoull(buf, 10, &val))
373 down_read(&zram->init_lock);
374 spin_lock(&zram->wb_limit_lock);
375 zram->bd_wb_limit = val;
376 spin_unlock(&zram->wb_limit_lock);
377 up_read(&zram->init_lock);
383 static ssize_t writeback_limit_show(struct device *dev,
384 struct device_attribute *attr, char *buf)
387 struct zram *zram = dev_to_zram(dev);
389 down_read(&zram->init_lock);
390 spin_lock(&zram->wb_limit_lock);
391 val = zram->bd_wb_limit;
392 spin_unlock(&zram->wb_limit_lock);
393 up_read(&zram->init_lock);
395 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
398 static void reset_bdev(struct zram *zram)
400 struct block_device *bdev;
402 if (!zram->backing_dev)
406 if (zram->old_block_size)
407 set_blocksize(bdev, zram->old_block_size);
408 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
409 /* hope filp_close flush all of IO */
410 filp_close(zram->backing_dev, NULL);
411 zram->backing_dev = NULL;
412 zram->old_block_size = 0;
414 zram->disk->fops = &zram_devops;
415 kvfree(zram->bitmap);
419 static ssize_t backing_dev_show(struct device *dev,
420 struct device_attribute *attr, char *buf)
423 struct zram *zram = dev_to_zram(dev);
427 down_read(&zram->init_lock);
428 file = zram->backing_dev;
430 memcpy(buf, "none\n", 5);
431 up_read(&zram->init_lock);
435 p = file_path(file, buf, PAGE_SIZE - 1);
442 memmove(buf, p, ret);
445 up_read(&zram->init_lock);
449 static ssize_t backing_dev_store(struct device *dev,
450 struct device_attribute *attr, const char *buf, size_t len)
454 struct file *backing_dev = NULL;
456 struct address_space *mapping;
457 unsigned int bitmap_sz, old_block_size = 0;
458 unsigned long nr_pages, *bitmap = NULL;
459 struct block_device *bdev = NULL;
461 struct zram *zram = dev_to_zram(dev);
463 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
467 down_write(&zram->init_lock);
468 if (init_done(zram)) {
469 pr_info("Can't setup backing device for initialized device\n");
474 strlcpy(file_name, buf, PATH_MAX);
475 /* ignore trailing newline */
476 sz = strlen(file_name);
477 if (sz > 0 && file_name[sz - 1] == '\n')
478 file_name[sz - 1] = 0x00;
480 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
481 if (IS_ERR(backing_dev)) {
482 err = PTR_ERR(backing_dev);
487 mapping = backing_dev->f_mapping;
488 inode = mapping->host;
490 /* Support only block device in this moment */
491 if (!S_ISBLK(inode->i_mode)) {
496 bdev = blkdev_get_by_dev(inode->i_rdev,
497 FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
504 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
505 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
506 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
512 old_block_size = block_size(bdev);
513 err = set_blocksize(bdev, PAGE_SIZE);
519 zram->old_block_size = old_block_size;
521 zram->backing_dev = backing_dev;
522 zram->bitmap = bitmap;
523 zram->nr_pages = nr_pages;
525 * With writeback feature, zram does asynchronous IO so it's no longer
526 * synchronous device so let's remove synchronous io flag. Othewise,
527 * upper layer(e.g., swap) could wait IO completion rather than
528 * (submit and return), which will cause system sluggish.
529 * Furthermore, when the IO function returns(e.g., swap_readpage),
530 * upper layer expects IO was done so it could deallocate the page
531 * freely but in fact, IO is going on so finally could cause
532 * use-after-free when the IO is really done.
534 zram->disk->fops = &zram_wb_devops;
535 up_write(&zram->init_lock);
537 pr_info("setup backing device %s\n", file_name);
546 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
549 filp_close(backing_dev, NULL);
551 up_write(&zram->init_lock);
558 static unsigned long alloc_block_bdev(struct zram *zram)
560 unsigned long blk_idx = 1;
562 /* skip 0 bit to confuse zram.handle = 0 */
563 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
564 if (blk_idx == zram->nr_pages)
567 if (test_and_set_bit(blk_idx, zram->bitmap))
570 atomic64_inc(&zram->stats.bd_count);
574 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
578 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
579 WARN_ON_ONCE(!was_set);
580 atomic64_dec(&zram->stats.bd_count);
583 static void zram_page_end_io(struct bio *bio)
585 struct page *page = bio_first_page_all(bio);
587 page_endio(page, op_is_write(bio_op(bio)),
588 blk_status_to_errno(bio->bi_status));
593 * Returns 1 if the submission is successful.
595 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
596 unsigned long entry, struct bio *parent)
600 bio = bio_alloc(GFP_ATOMIC, 1);
604 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
605 bio_set_dev(bio, zram->bdev);
606 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
612 bio->bi_opf = REQ_OP_READ;
613 bio->bi_end_io = zram_page_end_io;
615 bio->bi_opf = parent->bi_opf;
616 bio_chain(bio, parent);
623 #define HUGE_WRITEBACK 1
624 #define IDLE_WRITEBACK 2
626 static ssize_t writeback_store(struct device *dev,
627 struct device_attribute *attr, const char *buf, size_t len)
629 struct zram *zram = dev_to_zram(dev);
630 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
633 struct bio_vec bio_vec;
637 unsigned long blk_idx = 0;
639 if (sysfs_streq(buf, "idle"))
640 mode = IDLE_WRITEBACK;
641 else if (sysfs_streq(buf, "huge"))
642 mode = HUGE_WRITEBACK;
646 down_read(&zram->init_lock);
647 if (!init_done(zram)) {
649 goto release_init_lock;
652 if (!zram->backing_dev) {
654 goto release_init_lock;
657 page = alloc_page(GFP_KERNEL);
660 goto release_init_lock;
663 for (index = 0; index < nr_pages; index++) {
667 bvec.bv_len = PAGE_SIZE;
670 spin_lock(&zram->wb_limit_lock);
671 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
672 spin_unlock(&zram->wb_limit_lock);
676 spin_unlock(&zram->wb_limit_lock);
679 blk_idx = alloc_block_bdev(zram);
686 zram_slot_lock(zram, index);
687 if (!zram_allocated(zram, index))
690 if (zram_test_flag(zram, index, ZRAM_WB) ||
691 zram_test_flag(zram, index, ZRAM_SAME) ||
692 zram_test_flag(zram, index, ZRAM_UNDER_WB))
695 if (mode == IDLE_WRITEBACK &&
696 !zram_test_flag(zram, index, ZRAM_IDLE))
698 if (mode == HUGE_WRITEBACK &&
699 !zram_test_flag(zram, index, ZRAM_HUGE))
702 * Clearing ZRAM_UNDER_WB is duty of caller.
703 * IOW, zram_free_page never clear it.
705 zram_set_flag(zram, index, ZRAM_UNDER_WB);
706 /* Need for hugepage writeback racing */
707 zram_set_flag(zram, index, ZRAM_IDLE);
708 zram_slot_unlock(zram, index);
709 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
710 zram_slot_lock(zram, index);
711 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
712 zram_clear_flag(zram, index, ZRAM_IDLE);
713 zram_slot_unlock(zram, index);
717 bio_init(&bio, &bio_vec, 1);
718 bio_set_dev(&bio, zram->bdev);
719 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
720 bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
722 bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
725 * XXX: A single page IO would be inefficient for write
726 * but it would be not bad as starter.
728 err = submit_bio_wait(&bio);
730 zram_slot_lock(zram, index);
731 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
732 zram_clear_flag(zram, index, ZRAM_IDLE);
733 zram_slot_unlock(zram, index);
735 * Return last IO error unless every IO were
742 atomic64_inc(&zram->stats.bd_writes);
744 * We released zram_slot_lock so need to check if the slot was
745 * changed. If there is freeing for the slot, we can catch it
746 * easily by zram_allocated.
747 * A subtle case is the slot is freed/reallocated/marked as
748 * ZRAM_IDLE again. To close the race, idle_store doesn't
749 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
750 * Thus, we could close the race by checking ZRAM_IDLE bit.
752 zram_slot_lock(zram, index);
753 if (!zram_allocated(zram, index) ||
754 !zram_test_flag(zram, index, ZRAM_IDLE)) {
755 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
756 zram_clear_flag(zram, index, ZRAM_IDLE);
760 zram_free_page(zram, index);
761 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
762 zram_set_flag(zram, index, ZRAM_WB);
763 zram_set_element(zram, index, blk_idx);
765 atomic64_inc(&zram->stats.pages_stored);
766 spin_lock(&zram->wb_limit_lock);
767 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
768 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
769 spin_unlock(&zram->wb_limit_lock);
771 zram_slot_unlock(zram, index);
775 free_block_bdev(zram, blk_idx);
778 up_read(&zram->init_lock);
784 struct work_struct work;
791 #if PAGE_SIZE != 4096
792 static void zram_sync_read(struct work_struct *work)
794 struct zram_work *zw = container_of(work, struct zram_work, work);
795 struct zram *zram = zw->zram;
796 unsigned long entry = zw->entry;
797 struct bio *bio = zw->bio;
799 read_from_bdev_async(zram, &zw->bvec, entry, bio);
803 * Block layer want one ->submit_bio to be active at a time, so if we use
804 * chained IO with parent IO in same context, it's a deadlock. To avoid that,
805 * use a worker thread context.
807 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
808 unsigned long entry, struct bio *bio)
810 struct zram_work work;
817 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
818 queue_work(system_unbound_wq, &work.work);
819 flush_work(&work.work);
820 destroy_work_on_stack(&work.work);
825 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
826 unsigned long entry, struct bio *bio)
833 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
834 unsigned long entry, struct bio *parent, bool sync)
836 atomic64_inc(&zram->stats.bd_reads);
838 return read_from_bdev_sync(zram, bvec, entry, parent);
840 return read_from_bdev_async(zram, bvec, entry, parent);
843 static inline void reset_bdev(struct zram *zram) {};
844 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
845 unsigned long entry, struct bio *parent, bool sync)
850 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
853 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
855 static struct dentry *zram_debugfs_root;
857 static void zram_debugfs_create(void)
859 zram_debugfs_root = debugfs_create_dir("zram", NULL);
862 static void zram_debugfs_destroy(void)
864 debugfs_remove_recursive(zram_debugfs_root);
867 static void zram_accessed(struct zram *zram, u32 index)
869 zram_clear_flag(zram, index, ZRAM_IDLE);
870 zram->table[index].ac_time = ktime_get_boottime();
873 static ssize_t read_block_state(struct file *file, char __user *buf,
874 size_t count, loff_t *ppos)
877 ssize_t index, written = 0;
878 struct zram *zram = file->private_data;
879 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
880 struct timespec64 ts;
882 kbuf = kvmalloc(count, GFP_KERNEL);
886 down_read(&zram->init_lock);
887 if (!init_done(zram)) {
888 up_read(&zram->init_lock);
893 for (index = *ppos; index < nr_pages; index++) {
896 zram_slot_lock(zram, index);
897 if (!zram_allocated(zram, index))
900 ts = ktime_to_timespec64(zram->table[index].ac_time);
901 copied = snprintf(kbuf + written, count,
902 "%12zd %12lld.%06lu %c%c%c%c\n",
903 index, (s64)ts.tv_sec,
904 ts.tv_nsec / NSEC_PER_USEC,
905 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
906 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
907 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
908 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
910 if (count <= copied) {
911 zram_slot_unlock(zram, index);
917 zram_slot_unlock(zram, index);
921 up_read(&zram->init_lock);
922 if (copy_to_user(buf, kbuf, written))
929 static const struct file_operations proc_zram_block_state_op = {
931 .read = read_block_state,
932 .llseek = default_llseek,
935 static void zram_debugfs_register(struct zram *zram)
937 if (!zram_debugfs_root)
940 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
942 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
943 zram, &proc_zram_block_state_op);
946 static void zram_debugfs_unregister(struct zram *zram)
948 debugfs_remove_recursive(zram->debugfs_dir);
951 static void zram_debugfs_create(void) {};
952 static void zram_debugfs_destroy(void) {};
953 static void zram_accessed(struct zram *zram, u32 index)
955 zram_clear_flag(zram, index, ZRAM_IDLE);
957 static void zram_debugfs_register(struct zram *zram) {};
958 static void zram_debugfs_unregister(struct zram *zram) {};
962 * We switched to per-cpu streams and this attr is not needed anymore.
963 * However, we will keep it around for some time, because:
964 * a) we may revert per-cpu streams in the future
965 * b) it's visible to user space and we need to follow our 2 years
966 * retirement rule; but we already have a number of 'soon to be
967 * altered' attrs, so max_comp_streams need to wait for the next
970 static ssize_t max_comp_streams_show(struct device *dev,
971 struct device_attribute *attr, char *buf)
973 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
976 static ssize_t max_comp_streams_store(struct device *dev,
977 struct device_attribute *attr, const char *buf, size_t len)
982 static ssize_t comp_algorithm_show(struct device *dev,
983 struct device_attribute *attr, char *buf)
986 struct zram *zram = dev_to_zram(dev);
988 down_read(&zram->init_lock);
989 sz = zcomp_available_show(zram->compressor, buf);
990 up_read(&zram->init_lock);
995 static ssize_t comp_algorithm_store(struct device *dev,
996 struct device_attribute *attr, const char *buf, size_t len)
998 struct zram *zram = dev_to_zram(dev);
999 char compressor[ARRAY_SIZE(zram->compressor)];
1002 strlcpy(compressor, buf, sizeof(compressor));
1003 /* ignore trailing newline */
1004 sz = strlen(compressor);
1005 if (sz > 0 && compressor[sz - 1] == '\n')
1006 compressor[sz - 1] = 0x00;
1008 if (!zcomp_available_algorithm(compressor))
1011 down_write(&zram->init_lock);
1012 if (init_done(zram)) {
1013 up_write(&zram->init_lock);
1014 pr_info("Can't change algorithm for initialized device\n");
1018 strcpy(zram->compressor, compressor);
1019 up_write(&zram->init_lock);
1023 static ssize_t compact_store(struct device *dev,
1024 struct device_attribute *attr, const char *buf, size_t len)
1026 struct zram *zram = dev_to_zram(dev);
1028 down_read(&zram->init_lock);
1029 if (!init_done(zram)) {
1030 up_read(&zram->init_lock);
1034 zs_compact(zram->mem_pool);
1035 up_read(&zram->init_lock);
1040 static ssize_t io_stat_show(struct device *dev,
1041 struct device_attribute *attr, char *buf)
1043 struct zram *zram = dev_to_zram(dev);
1046 down_read(&zram->init_lock);
1047 ret = scnprintf(buf, PAGE_SIZE,
1048 "%8llu %8llu %8llu %8llu\n",
1049 (u64)atomic64_read(&zram->stats.failed_reads),
1050 (u64)atomic64_read(&zram->stats.failed_writes),
1051 (u64)atomic64_read(&zram->stats.invalid_io),
1052 (u64)atomic64_read(&zram->stats.notify_free));
1053 up_read(&zram->init_lock);
1058 static ssize_t mm_stat_show(struct device *dev,
1059 struct device_attribute *attr, char *buf)
1061 struct zram *zram = dev_to_zram(dev);
1062 struct zs_pool_stats pool_stats;
1063 u64 orig_size, mem_used = 0;
1067 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1069 down_read(&zram->init_lock);
1070 if (init_done(zram)) {
1071 mem_used = zs_get_total_pages(zram->mem_pool);
1072 zs_pool_stats(zram->mem_pool, &pool_stats);
1075 orig_size = atomic64_read(&zram->stats.pages_stored);
1076 max_used = atomic_long_read(&zram->stats.max_used_pages);
1078 ret = scnprintf(buf, PAGE_SIZE,
1079 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
1080 orig_size << PAGE_SHIFT,
1081 (u64)atomic64_read(&zram->stats.compr_data_size),
1082 mem_used << PAGE_SHIFT,
1083 zram->limit_pages << PAGE_SHIFT,
1084 max_used << PAGE_SHIFT,
1085 (u64)atomic64_read(&zram->stats.same_pages),
1086 atomic_long_read(&pool_stats.pages_compacted),
1087 (u64)atomic64_read(&zram->stats.huge_pages));
1088 up_read(&zram->init_lock);
1093 #ifdef CONFIG_ZRAM_WRITEBACK
1094 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
1095 static ssize_t bd_stat_show(struct device *dev,
1096 struct device_attribute *attr, char *buf)
1098 struct zram *zram = dev_to_zram(dev);
1101 down_read(&zram->init_lock);
1102 ret = scnprintf(buf, PAGE_SIZE,
1103 "%8llu %8llu %8llu\n",
1104 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1105 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1106 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1107 up_read(&zram->init_lock);
1113 static ssize_t debug_stat_show(struct device *dev,
1114 struct device_attribute *attr, char *buf)
1117 struct zram *zram = dev_to_zram(dev);
1120 down_read(&zram->init_lock);
1121 ret = scnprintf(buf, PAGE_SIZE,
1122 "version: %d\n%8llu %8llu\n",
1124 (u64)atomic64_read(&zram->stats.writestall),
1125 (u64)atomic64_read(&zram->stats.miss_free));
1126 up_read(&zram->init_lock);
1131 static DEVICE_ATTR_RO(io_stat);
1132 static DEVICE_ATTR_RO(mm_stat);
1133 #ifdef CONFIG_ZRAM_WRITEBACK
1134 static DEVICE_ATTR_RO(bd_stat);
1136 static DEVICE_ATTR_RO(debug_stat);
1138 static void zram_meta_free(struct zram *zram, u64 disksize)
1140 size_t num_pages = disksize >> PAGE_SHIFT;
1143 /* Free all pages that are still in this zram device */
1144 for (index = 0; index < num_pages; index++)
1145 zram_free_page(zram, index);
1147 zs_destroy_pool(zram->mem_pool);
1151 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1155 num_pages = disksize >> PAGE_SHIFT;
1156 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1160 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1161 if (!zram->mem_pool) {
1166 if (!huge_class_size)
1167 huge_class_size = zs_huge_class_size(zram->mem_pool);
1172 * To protect concurrent access to the same index entry,
1173 * caller should hold this table index entry's bit_spinlock to
1174 * indicate this index entry is accessing.
1176 static void zram_free_page(struct zram *zram, size_t index)
1178 unsigned long handle;
1180 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
1181 zram->table[index].ac_time = 0;
1183 if (zram_test_flag(zram, index, ZRAM_IDLE))
1184 zram_clear_flag(zram, index, ZRAM_IDLE);
1186 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1187 zram_clear_flag(zram, index, ZRAM_HUGE);
1188 atomic64_dec(&zram->stats.huge_pages);
1191 if (zram_test_flag(zram, index, ZRAM_WB)) {
1192 zram_clear_flag(zram, index, ZRAM_WB);
1193 free_block_bdev(zram, zram_get_element(zram, index));
1198 * No memory is allocated for same element filled pages.
1199 * Simply clear same page flag.
1201 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1202 zram_clear_flag(zram, index, ZRAM_SAME);
1203 atomic64_dec(&zram->stats.same_pages);
1207 handle = zram_get_handle(zram, index);
1211 zs_free(zram->mem_pool, handle);
1213 atomic64_sub(zram_get_obj_size(zram, index),
1214 &zram->stats.compr_data_size);
1216 atomic64_dec(&zram->stats.pages_stored);
1217 zram_set_handle(zram, index, 0);
1218 zram_set_obj_size(zram, index, 0);
1219 WARN_ON_ONCE(zram->table[index].flags &
1220 ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
1223 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1224 struct bio *bio, bool partial_io)
1226 struct zcomp_strm *zstrm;
1227 unsigned long handle;
1232 zram_slot_lock(zram, index);
1233 if (zram_test_flag(zram, index, ZRAM_WB)) {
1234 struct bio_vec bvec;
1236 zram_slot_unlock(zram, index);
1238 bvec.bv_page = page;
1239 bvec.bv_len = PAGE_SIZE;
1241 return read_from_bdev(zram, &bvec,
1242 zram_get_element(zram, index),
1246 handle = zram_get_handle(zram, index);
1247 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1248 unsigned long value;
1251 value = handle ? zram_get_element(zram, index) : 0;
1252 mem = kmap_atomic(page);
1253 zram_fill_page(mem, PAGE_SIZE, value);
1255 zram_slot_unlock(zram, index);
1259 size = zram_get_obj_size(zram, index);
1261 if (size != PAGE_SIZE)
1262 zstrm = zcomp_stream_get(zram->comp);
1264 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1265 if (size == PAGE_SIZE) {
1266 dst = kmap_atomic(page);
1267 memcpy(dst, src, PAGE_SIZE);
1271 dst = kmap_atomic(page);
1272 ret = zcomp_decompress(zstrm, src, size, dst);
1274 zcomp_stream_put(zram->comp);
1276 zs_unmap_object(zram->mem_pool, handle);
1277 zram_slot_unlock(zram, index);
1279 /* Should NEVER happen. Return bio error if it does. */
1281 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1286 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1287 u32 index, int offset, struct bio *bio)
1292 page = bvec->bv_page;
1293 if (is_partial_io(bvec)) {
1294 /* Use a temporary buffer to decompress the page */
1295 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1300 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1304 if (is_partial_io(bvec)) {
1305 void *dst = kmap_atomic(bvec->bv_page);
1306 void *src = kmap_atomic(page);
1308 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
1313 if (is_partial_io(bvec))
1319 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1320 u32 index, struct bio *bio)
1323 unsigned long alloced_pages;
1324 unsigned long handle = 0;
1325 unsigned int comp_len = 0;
1326 void *src, *dst, *mem;
1327 struct zcomp_strm *zstrm;
1328 struct page *page = bvec->bv_page;
1329 unsigned long element = 0;
1330 enum zram_pageflags flags = 0;
1332 mem = kmap_atomic(page);
1333 if (page_same_filled(mem, &element)) {
1335 /* Free memory associated with this sector now. */
1337 atomic64_inc(&zram->stats.same_pages);
1343 zstrm = zcomp_stream_get(zram->comp);
1344 src = kmap_atomic(page);
1345 ret = zcomp_compress(zstrm, src, &comp_len);
1348 if (unlikely(ret)) {
1349 zcomp_stream_put(zram->comp);
1350 pr_err("Compression failed! err=%d\n", ret);
1351 zs_free(zram->mem_pool, handle);
1355 if (comp_len >= huge_class_size)
1356 comp_len = PAGE_SIZE;
1358 * handle allocation has 2 paths:
1359 * a) fast path is executed with preemption disabled (for
1360 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1361 * since we can't sleep;
1362 * b) slow path enables preemption and attempts to allocate
1363 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1364 * put per-cpu compression stream and, thus, to re-do
1365 * the compression once handle is allocated.
1367 * if we have a 'non-null' handle here then we are coming
1368 * from the slow path and handle has already been allocated.
1371 handle = zs_malloc(zram->mem_pool, comp_len,
1372 __GFP_KSWAPD_RECLAIM |
1377 zcomp_stream_put(zram->comp);
1378 atomic64_inc(&zram->stats.writestall);
1379 handle = zs_malloc(zram->mem_pool, comp_len,
1380 GFP_NOIO | __GFP_HIGHMEM |
1383 goto compress_again;
1387 alloced_pages = zs_get_total_pages(zram->mem_pool);
1388 update_used_max(zram, alloced_pages);
1390 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1391 zcomp_stream_put(zram->comp);
1392 zs_free(zram->mem_pool, handle);
1396 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1398 src = zstrm->buffer;
1399 if (comp_len == PAGE_SIZE)
1400 src = kmap_atomic(page);
1401 memcpy(dst, src, comp_len);
1402 if (comp_len == PAGE_SIZE)
1405 zcomp_stream_put(zram->comp);
1406 zs_unmap_object(zram->mem_pool, handle);
1407 atomic64_add(comp_len, &zram->stats.compr_data_size);
1410 * Free memory associated with this sector
1411 * before overwriting unused sectors.
1413 zram_slot_lock(zram, index);
1414 zram_free_page(zram, index);
1416 if (comp_len == PAGE_SIZE) {
1417 zram_set_flag(zram, index, ZRAM_HUGE);
1418 atomic64_inc(&zram->stats.huge_pages);
1422 zram_set_flag(zram, index, flags);
1423 zram_set_element(zram, index, element);
1425 zram_set_handle(zram, index, handle);
1426 zram_set_obj_size(zram, index, comp_len);
1428 zram_slot_unlock(zram, index);
1431 atomic64_inc(&zram->stats.pages_stored);
1435 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1436 u32 index, int offset, struct bio *bio)
1439 struct page *page = NULL;
1444 if (is_partial_io(bvec)) {
1447 * This is a partial IO. We need to read the full page
1448 * before to write the changes.
1450 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1454 ret = __zram_bvec_read(zram, page, index, bio, true);
1458 src = kmap_atomic(bvec->bv_page);
1459 dst = kmap_atomic(page);
1460 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1465 vec.bv_len = PAGE_SIZE;
1469 ret = __zram_bvec_write(zram, &vec, index, bio);
1471 if (is_partial_io(bvec))
1477 * zram_bio_discard - handler on discard request
1478 * @index: physical block index in PAGE_SIZE units
1479 * @offset: byte offset within physical block
1481 static void zram_bio_discard(struct zram *zram, u32 index,
1482 int offset, struct bio *bio)
1484 size_t n = bio->bi_iter.bi_size;
1487 * zram manages data in physical block size units. Because logical block
1488 * size isn't identical with physical block size on some arch, we
1489 * could get a discard request pointing to a specific offset within a
1490 * certain physical block. Although we can handle this request by
1491 * reading that physiclal block and decompressing and partially zeroing
1492 * and re-compressing and then re-storing it, this isn't reasonable
1493 * because our intent with a discard request is to save memory. So
1494 * skipping this logical block is appropriate here.
1497 if (n <= (PAGE_SIZE - offset))
1500 n -= (PAGE_SIZE - offset);
1504 while (n >= PAGE_SIZE) {
1505 zram_slot_lock(zram, index);
1506 zram_free_page(zram, index);
1507 zram_slot_unlock(zram, index);
1508 atomic64_inc(&zram->stats.notify_free);
1515 * Returns errno if it has some problem. Otherwise return 0 or 1.
1516 * Returns 0 if IO request was done synchronously
1517 * Returns 1 if IO request was successfully submitted.
1519 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1520 int offset, unsigned int op, struct bio *bio)
1524 if (!op_is_write(op)) {
1525 atomic64_inc(&zram->stats.num_reads);
1526 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1527 flush_dcache_page(bvec->bv_page);
1529 atomic64_inc(&zram->stats.num_writes);
1530 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1533 zram_slot_lock(zram, index);
1534 zram_accessed(zram, index);
1535 zram_slot_unlock(zram, index);
1537 if (unlikely(ret < 0)) {
1538 if (!op_is_write(op))
1539 atomic64_inc(&zram->stats.failed_reads);
1541 atomic64_inc(&zram->stats.failed_writes);
1547 static void __zram_make_request(struct zram *zram, struct bio *bio)
1551 struct bio_vec bvec;
1552 struct bvec_iter iter;
1553 unsigned long start_time;
1555 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1556 offset = (bio->bi_iter.bi_sector &
1557 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1559 switch (bio_op(bio)) {
1560 case REQ_OP_DISCARD:
1561 case REQ_OP_WRITE_ZEROES:
1562 zram_bio_discard(zram, index, offset, bio);
1569 start_time = bio_start_io_acct(bio);
1570 bio_for_each_segment(bvec, bio, iter) {
1571 struct bio_vec bv = bvec;
1572 unsigned int unwritten = bvec.bv_len;
1575 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1577 if (zram_bvec_rw(zram, &bv, index, offset,
1578 bio_op(bio), bio) < 0) {
1579 bio->bi_status = BLK_STS_IOERR;
1583 bv.bv_offset += bv.bv_len;
1584 unwritten -= bv.bv_len;
1586 update_position(&index, &offset, &bv);
1587 } while (unwritten);
1589 bio_end_io_acct(bio, start_time);
1594 * Handler function for all zram I/O requests.
1596 static blk_qc_t zram_submit_bio(struct bio *bio)
1598 struct zram *zram = bio->bi_disk->private_data;
1600 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1601 bio->bi_iter.bi_size)) {
1602 atomic64_inc(&zram->stats.invalid_io);
1606 __zram_make_request(zram, bio);
1607 return BLK_QC_T_NONE;
1611 return BLK_QC_T_NONE;
1614 static void zram_slot_free_notify(struct block_device *bdev,
1615 unsigned long index)
1619 zram = bdev->bd_disk->private_data;
1621 atomic64_inc(&zram->stats.notify_free);
1622 if (!zram_slot_trylock(zram, index)) {
1623 atomic64_inc(&zram->stats.miss_free);
1627 zram_free_page(zram, index);
1628 zram_slot_unlock(zram, index);
1631 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1632 struct page *page, unsigned int op)
1638 unsigned long start_time;
1640 if (PageTransHuge(page))
1642 zram = bdev->bd_disk->private_data;
1644 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1645 atomic64_inc(&zram->stats.invalid_io);
1650 index = sector >> SECTORS_PER_PAGE_SHIFT;
1651 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1654 bv.bv_len = PAGE_SIZE;
1657 start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
1658 ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1659 disk_end_io_acct(bdev->bd_disk, op, start_time);
1662 * If I/O fails, just return error(ie, non-zero) without
1663 * calling page_endio.
1664 * It causes resubmit the I/O with bio request by upper functions
1665 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1666 * bio->bi_end_io does things to handle the error
1667 * (e.g., SetPageError, set_page_dirty and extra works).
1669 if (unlikely(ret < 0))
1674 page_endio(page, op_is_write(op), 0);
1685 static void zram_reset_device(struct zram *zram)
1690 down_write(&zram->init_lock);
1692 zram->limit_pages = 0;
1694 if (!init_done(zram)) {
1695 up_write(&zram->init_lock);
1700 disksize = zram->disksize;
1703 set_capacity(zram->disk, 0);
1704 part_stat_set_all(&zram->disk->part0, 0);
1706 up_write(&zram->init_lock);
1707 /* I/O operation under all of CPU are done so let's free */
1708 zram_meta_free(zram, disksize);
1709 memset(&zram->stats, 0, sizeof(zram->stats));
1710 zcomp_destroy(comp);
1714 static ssize_t disksize_store(struct device *dev,
1715 struct device_attribute *attr, const char *buf, size_t len)
1719 struct zram *zram = dev_to_zram(dev);
1722 disksize = memparse(buf, NULL);
1726 down_write(&zram->init_lock);
1727 if (init_done(zram)) {
1728 pr_info("Cannot change disksize for initialized device\n");
1733 disksize = PAGE_ALIGN(disksize);
1734 if (!zram_meta_alloc(zram, disksize)) {
1739 comp = zcomp_create(zram->compressor);
1741 pr_err("Cannot initialise %s compressing backend\n",
1743 err = PTR_ERR(comp);
1748 zram->disksize = disksize;
1749 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1751 revalidate_disk_size(zram->disk, true);
1752 up_write(&zram->init_lock);
1757 zram_meta_free(zram, disksize);
1759 up_write(&zram->init_lock);
1763 static ssize_t reset_store(struct device *dev,
1764 struct device_attribute *attr, const char *buf, size_t len)
1767 unsigned short do_reset;
1769 struct block_device *bdev;
1771 ret = kstrtou16(buf, 10, &do_reset);
1778 zram = dev_to_zram(dev);
1779 bdev = bdget_disk(zram->disk, 0);
1783 mutex_lock(&bdev->bd_mutex);
1784 /* Do not reset an active device or claimed device */
1785 if (bdev->bd_openers || zram->claim) {
1786 mutex_unlock(&bdev->bd_mutex);
1791 /* From now on, anyone can't open /dev/zram[0-9] */
1793 mutex_unlock(&bdev->bd_mutex);
1795 /* Make sure all the pending I/O are finished */
1797 zram_reset_device(zram);
1798 revalidate_disk_size(zram->disk, true);
1801 mutex_lock(&bdev->bd_mutex);
1802 zram->claim = false;
1803 mutex_unlock(&bdev->bd_mutex);
1808 static int zram_open(struct block_device *bdev, fmode_t mode)
1813 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1815 zram = bdev->bd_disk->private_data;
1816 /* zram was claimed to reset so open request fails */
1823 static const struct block_device_operations zram_devops = {
1825 .submit_bio = zram_submit_bio,
1826 .swap_slot_free_notify = zram_slot_free_notify,
1827 .rw_page = zram_rw_page,
1828 .owner = THIS_MODULE
1831 static const struct block_device_operations zram_wb_devops = {
1833 .submit_bio = zram_submit_bio,
1834 .swap_slot_free_notify = zram_slot_free_notify,
1835 .owner = THIS_MODULE
1838 static DEVICE_ATTR_WO(compact);
1839 static DEVICE_ATTR_RW(disksize);
1840 static DEVICE_ATTR_RO(initstate);
1841 static DEVICE_ATTR_WO(reset);
1842 static DEVICE_ATTR_WO(mem_limit);
1843 static DEVICE_ATTR_WO(mem_used_max);
1844 static DEVICE_ATTR_WO(idle);
1845 static DEVICE_ATTR_RW(max_comp_streams);
1846 static DEVICE_ATTR_RW(comp_algorithm);
1847 #ifdef CONFIG_ZRAM_WRITEBACK
1848 static DEVICE_ATTR_RW(backing_dev);
1849 static DEVICE_ATTR_WO(writeback);
1850 static DEVICE_ATTR_RW(writeback_limit);
1851 static DEVICE_ATTR_RW(writeback_limit_enable);
1854 static struct attribute *zram_disk_attrs[] = {
1855 &dev_attr_disksize.attr,
1856 &dev_attr_initstate.attr,
1857 &dev_attr_reset.attr,
1858 &dev_attr_compact.attr,
1859 &dev_attr_mem_limit.attr,
1860 &dev_attr_mem_used_max.attr,
1861 &dev_attr_idle.attr,
1862 &dev_attr_max_comp_streams.attr,
1863 &dev_attr_comp_algorithm.attr,
1864 #ifdef CONFIG_ZRAM_WRITEBACK
1865 &dev_attr_backing_dev.attr,
1866 &dev_attr_writeback.attr,
1867 &dev_attr_writeback_limit.attr,
1868 &dev_attr_writeback_limit_enable.attr,
1870 &dev_attr_io_stat.attr,
1871 &dev_attr_mm_stat.attr,
1872 #ifdef CONFIG_ZRAM_WRITEBACK
1873 &dev_attr_bd_stat.attr,
1875 &dev_attr_debug_stat.attr,
1879 static const struct attribute_group zram_disk_attr_group = {
1880 .attrs = zram_disk_attrs,
1883 static const struct attribute_group *zram_disk_attr_groups[] = {
1884 &zram_disk_attr_group,
1889 * Allocate and initialize new zram device. the function returns
1890 * '>= 0' device_id upon success, and negative value otherwise.
1892 static int zram_add(void)
1895 struct request_queue *queue;
1898 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1902 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1907 init_rwsem(&zram->init_lock);
1908 #ifdef CONFIG_ZRAM_WRITEBACK
1909 spin_lock_init(&zram->wb_limit_lock);
1911 queue = blk_alloc_queue(NUMA_NO_NODE);
1913 pr_err("Error allocating disk queue for device %d\n",
1919 /* gendisk structure */
1920 zram->disk = alloc_disk(1);
1922 pr_err("Error allocating disk structure for device %d\n",
1925 goto out_free_queue;
1928 zram->disk->major = zram_major;
1929 zram->disk->first_minor = device_id;
1930 zram->disk->fops = &zram_devops;
1931 zram->disk->queue = queue;
1932 zram->disk->private_data = zram;
1933 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1935 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1936 set_capacity(zram->disk, 0);
1937 /* zram devices sort of resembles non-rotational disks */
1938 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
1939 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1942 * To ensure that we always get PAGE_SIZE aligned
1943 * and n*PAGE_SIZED sized I/O requests.
1945 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1946 blk_queue_logical_block_size(zram->disk->queue,
1947 ZRAM_LOGICAL_BLOCK_SIZE);
1948 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1949 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1950 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1951 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1952 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
1955 * zram_bio_discard() will clear all logical blocks if logical block
1956 * size is identical with physical block size(PAGE_SIZE). But if it is
1957 * different, we will skip discarding some parts of logical blocks in
1958 * the part of the request range which isn't aligned to physical block
1959 * size. So we can't ensure that all discarded logical blocks are
1962 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1963 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1965 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
1966 device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
1968 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1970 zram_debugfs_register(zram);
1971 pr_info("Added device: %s\n", zram->disk->disk_name);
1975 blk_cleanup_queue(queue);
1977 idr_remove(&zram_index_idr, device_id);
1983 static int zram_remove(struct zram *zram)
1985 struct block_device *bdev;
1987 bdev = bdget_disk(zram->disk, 0);
1991 mutex_lock(&bdev->bd_mutex);
1992 if (bdev->bd_openers || zram->claim) {
1993 mutex_unlock(&bdev->bd_mutex);
1999 mutex_unlock(&bdev->bd_mutex);
2001 zram_debugfs_unregister(zram);
2003 /* Make sure all the pending I/O are finished */
2005 zram_reset_device(zram);
2008 pr_info("Removed device: %s\n", zram->disk->disk_name);
2010 del_gendisk(zram->disk);
2011 blk_cleanup_queue(zram->disk->queue);
2012 put_disk(zram->disk);
2017 /* zram-control sysfs attributes */
2020 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2021 * sense that reading from this file does alter the state of your system -- it
2022 * creates a new un-initialized zram device and returns back this device's
2023 * device_id (or an error code if it fails to create a new device).
2025 static ssize_t hot_add_show(struct class *class,
2026 struct class_attribute *attr,
2031 mutex_lock(&zram_index_mutex);
2033 mutex_unlock(&zram_index_mutex);
2037 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2039 static struct class_attribute class_attr_hot_add =
2040 __ATTR(hot_add, 0400, hot_add_show, NULL);
2042 static ssize_t hot_remove_store(struct class *class,
2043 struct class_attribute *attr,
2050 /* dev_id is gendisk->first_minor, which is `int' */
2051 ret = kstrtoint(buf, 10, &dev_id);
2057 mutex_lock(&zram_index_mutex);
2059 zram = idr_find(&zram_index_idr, dev_id);
2061 ret = zram_remove(zram);
2063 idr_remove(&zram_index_idr, dev_id);
2068 mutex_unlock(&zram_index_mutex);
2069 return ret ? ret : count;
2071 static CLASS_ATTR_WO(hot_remove);
2073 static struct attribute *zram_control_class_attrs[] = {
2074 &class_attr_hot_add.attr,
2075 &class_attr_hot_remove.attr,
2078 ATTRIBUTE_GROUPS(zram_control_class);
2080 static struct class zram_control_class = {
2081 .name = "zram-control",
2082 .owner = THIS_MODULE,
2083 .class_groups = zram_control_class_groups,
2086 static int zram_remove_cb(int id, void *ptr, void *data)
2092 static void destroy_devices(void)
2094 class_unregister(&zram_control_class);
2095 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2096 zram_debugfs_destroy();
2097 idr_destroy(&zram_index_idr);
2098 unregister_blkdev(zram_major, "zram");
2099 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2102 static int __init zram_init(void)
2106 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2107 zcomp_cpu_up_prepare, zcomp_cpu_dead);
2111 ret = class_register(&zram_control_class);
2113 pr_err("Unable to register zram-control class\n");
2114 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2118 zram_debugfs_create();
2119 zram_major = register_blkdev(0, "zram");
2120 if (zram_major <= 0) {
2121 pr_err("Unable to get major number\n");
2122 class_unregister(&zram_control_class);
2123 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2127 while (num_devices != 0) {
2128 mutex_lock(&zram_index_mutex);
2130 mutex_unlock(&zram_index_mutex);
2143 static void __exit zram_exit(void)
2148 module_init(zram_init);
2149 module_exit(zram_exit);
2151 module_param(num_devices, uint, 0);
2152 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2154 MODULE_LICENSE("Dual BSD/GPL");
2155 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2156 MODULE_DESCRIPTION("Compressed RAM Block Device");