1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/rbtree.h>
24 #include <linux/swap.h>
25 #include <linux/crypto.h>
26 #include <linux/scatterlist.h>
27 #include <linux/mempolicy.h>
28 #include <linux/mempool.h>
29 #include <linux/zpool.h>
30 #include <crypto/acompress.h>
31 #include <linux/zswap.h>
32 #include <linux/mm_types.h>
33 #include <linux/page-flags.h>
34 #include <linux/swapops.h>
35 #include <linux/writeback.h>
36 #include <linux/pagemap.h>
37 #include <linux/workqueue.h>
38 #include <linux/list_lru.h>
43 /*********************************
45 **********************************/
46 /* Total bytes used by the compressed storage */
47 u64 zswap_pool_total_size;
48 /* The number of compressed pages currently stored in zswap */
49 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
50 /* The number of same-value filled pages currently stored in zswap */
51 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Store failed due to compression algorithm failure */
67 static u64 zswap_reject_compress_fail;
68 /* Compressed page was too big for the allocator to (optimally) store */
69 static u64 zswap_reject_compress_poor;
70 /* Store failed because underlying allocator could not get memory */
71 static u64 zswap_reject_alloc_fail;
72 /* Store failed because the entry metadata could not be allocated (rare) */
73 static u64 zswap_reject_kmemcache_fail;
74 /* Duplicate store was encountered (rare) */
75 static u64 zswap_duplicate_entry;
77 /* Shrinker work queue */
78 static struct workqueue_struct *shrink_wq;
79 /* Pool limit was hit, we need to calm down */
80 static bool zswap_pool_reached_full;
82 /*********************************
84 **********************************/
86 #define ZSWAP_PARAM_UNSET ""
88 static int zswap_setup(void);
90 /* Enable/disable zswap */
91 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
92 static int zswap_enabled_param_set(const char *,
93 const struct kernel_param *);
94 static const struct kernel_param_ops zswap_enabled_param_ops = {
95 .set = zswap_enabled_param_set,
96 .get = param_get_bool,
98 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
100 /* Crypto compressor to use */
101 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
102 static int zswap_compressor_param_set(const char *,
103 const struct kernel_param *);
104 static const struct kernel_param_ops zswap_compressor_param_ops = {
105 .set = zswap_compressor_param_set,
106 .get = param_get_charp,
107 .free = param_free_charp,
109 module_param_cb(compressor, &zswap_compressor_param_ops,
110 &zswap_compressor, 0644);
112 /* Compressed storage zpool to use */
113 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
114 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
115 static const struct kernel_param_ops zswap_zpool_param_ops = {
116 .set = zswap_zpool_param_set,
117 .get = param_get_charp,
118 .free = param_free_charp,
120 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
122 /* The maximum percentage of memory that the compressed pool can occupy */
123 static unsigned int zswap_max_pool_percent = 20;
124 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
126 /* The threshold for accepting new pages after the max_pool_percent was hit */
127 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
132 * Enable/disable handling same-value filled pages (enabled by default).
133 * If disabled every page is considered non-same-value filled.
135 static bool zswap_same_filled_pages_enabled = true;
136 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
139 /* Enable/disable handling non-same-value filled pages (enabled by default) */
140 static bool zswap_non_same_filled_pages_enabled = true;
141 module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
144 static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146 module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
148 /* Number of zpools in zswap_pool (empirically determined for scalability) */
149 #define ZSWAP_NR_ZPOOLS 32
151 /* Enable/disable memory pressure-based shrinker. */
152 static bool zswap_shrinker_enabled = IS_ENABLED(
153 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
156 bool is_zswap_enabled(void)
158 return zswap_enabled;
161 /*********************************
163 **********************************/
165 struct crypto_acomp_ctx {
166 struct crypto_acomp *acomp;
167 struct acomp_req *req;
168 struct crypto_wait wait;
174 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
175 * The only case where lru_lock is not acquired while holding tree.lock is
176 * when a zswap_entry is taken off the lru for writeback, in that case it
177 * needs to be verified that it's still valid in the tree.
180 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
181 struct crypto_acomp_ctx __percpu *acomp_ctx;
183 struct list_head list;
184 struct work_struct release_work;
185 struct work_struct shrink_work;
186 struct hlist_node node;
187 char tfm_name[CRYPTO_MAX_ALG_NAME];
188 struct list_lru list_lru;
189 struct mem_cgroup *next_shrink;
190 struct shrinker *shrinker;
197 * This structure contains the metadata for tracking a single compressed
200 * rbnode - links the entry into red-black tree for the appropriate swap type
201 * swpentry - associated swap entry, the offset indexes into the red-black tree
202 * refcount - the number of outstanding reference to the entry. This is needed
203 * to protect against premature freeing of the entry by code
204 * concurrent calls to load, invalidate, and writeback. The lock
205 * for the zswap_tree structure that contains the entry must
206 * be held while changing the refcount. Since the lock must
207 * be held, there is no reason to also make refcount atomic.
208 * length - the length in bytes of the compressed page data. Needed during
209 * decompression. For a same value filled page length is 0, and both
210 * pool and lru are invalid and must be ignored.
211 * pool - the zswap_pool the entry's data is in
212 * handle - zpool allocation handle that stores the compressed page data
213 * value - value of the same-value filled pages which have same content
214 * objcg - the obj_cgroup that the compressed memory is charged to
215 * lru - handle to the pool's lru used to evict pages.
218 struct rb_node rbnode;
219 swp_entry_t swpentry;
222 struct zswap_pool *pool;
224 unsigned long handle;
227 struct obj_cgroup *objcg;
228 struct list_head lru;
232 * The tree lock in the zswap_tree struct protects a few things:
234 * - the refcount field of each entry in the tree
237 struct rb_root rbroot;
241 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
243 /* RCU-protected iteration */
244 static LIST_HEAD(zswap_pools);
245 /* protects zswap_pools list modification */
246 static DEFINE_SPINLOCK(zswap_pools_lock);
247 /* pool counter to provide unique names to zpool */
248 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
250 enum zswap_init_type {
256 static enum zswap_init_type zswap_init_state;
258 /* used to ensure the integrity of initialization */
259 static DEFINE_MUTEX(zswap_init_lock);
261 /* init completed, but couldn't create the initial pool */
262 static bool zswap_has_pool;
264 /*********************************
265 * helpers and fwd declarations
266 **********************************/
268 #define zswap_pool_debug(msg, p) \
269 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
270 zpool_get_type((p)->zpools[0]))
272 static int zswap_writeback_entry(struct zswap_entry *entry,
273 struct zswap_tree *tree);
274 static int zswap_pool_get(struct zswap_pool *pool);
275 static void zswap_pool_put(struct zswap_pool *pool);
277 static bool zswap_is_full(void)
279 return totalram_pages() * zswap_max_pool_percent / 100 <
280 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
283 static bool zswap_can_accept(void)
285 return totalram_pages() * zswap_accept_thr_percent / 100 *
286 zswap_max_pool_percent / 100 >
287 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
290 static u64 get_zswap_pool_size(struct zswap_pool *pool)
295 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
296 pool_size += zpool_get_total_size(pool->zpools[i]);
301 static void zswap_update_total_size(void)
303 struct zswap_pool *pool;
308 list_for_each_entry_rcu(pool, &zswap_pools, list)
309 total += get_zswap_pool_size(pool);
313 zswap_pool_total_size = total;
316 /* should be called under RCU */
318 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
320 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
323 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
329 static inline int entry_to_nid(struct zswap_entry *entry)
331 return page_to_nid(virt_to_page(entry));
334 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
336 struct zswap_pool *pool;
338 /* lock out zswap pools list modification */
339 spin_lock(&zswap_pools_lock);
340 list_for_each_entry(pool, &zswap_pools, list) {
341 if (pool->next_shrink == memcg)
342 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
344 spin_unlock(&zswap_pools_lock);
347 /*********************************
348 * zswap entry functions
349 **********************************/
350 static struct kmem_cache *zswap_entry_cache;
352 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
354 struct zswap_entry *entry;
355 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
359 RB_CLEAR_NODE(&entry->rbnode);
363 static void zswap_entry_cache_free(struct zswap_entry *entry)
365 kmem_cache_free(zswap_entry_cache, entry);
368 /*********************************
369 * zswap lruvec functions
370 **********************************/
371 void zswap_lruvec_state_init(struct lruvec *lruvec)
373 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
376 void zswap_folio_swapin(struct folio *folio)
378 struct lruvec *lruvec;
380 VM_WARN_ON_ONCE(!folio_test_locked(folio));
381 lruvec = folio_lruvec(folio);
382 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
385 /*********************************
387 **********************************/
388 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
390 atomic_long_t *nr_zswap_protected;
391 unsigned long lru_size, old, new;
392 int nid = entry_to_nid(entry);
393 struct mem_cgroup *memcg;
394 struct lruvec *lruvec;
397 * Note that it is safe to use rcu_read_lock() here, even in the face of
398 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
399 * used in list_lru lookup, only two scenarios are possible:
401 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
402 * new entry will be reparented to memcg's parent's list_lru.
403 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
404 * new entry will be added directly to memcg's parent's list_lru.
406 * Similar reasoning holds for list_lru_del() and list_lru_putback().
409 memcg = mem_cgroup_from_entry(entry);
410 /* will always succeed */
411 list_lru_add(list_lru, &entry->lru, nid, memcg);
413 /* Update the protection area */
414 lru_size = list_lru_count_one(list_lru, nid, memcg);
415 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
416 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
417 old = atomic_long_inc_return(nr_zswap_protected);
419 * Decay to avoid overflow and adapt to changing workloads.
420 * This is based on LRU reclaim cost decaying heuristics.
423 new = old > lru_size / 4 ? old / 2 : old;
424 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
428 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
430 int nid = entry_to_nid(entry);
431 struct mem_cgroup *memcg;
434 memcg = mem_cgroup_from_entry(entry);
435 /* will always succeed */
436 list_lru_del(list_lru, &entry->lru, nid, memcg);
440 static void zswap_lru_putback(struct list_lru *list_lru,
441 struct zswap_entry *entry)
443 int nid = entry_to_nid(entry);
444 spinlock_t *lock = &list_lru->node[nid].lock;
445 struct mem_cgroup *memcg;
446 struct lruvec *lruvec;
449 memcg = mem_cgroup_from_entry(entry);
451 /* we cannot use list_lru_add here, because it increments node's lru count */
452 list_lru_putback(list_lru, &entry->lru, nid, memcg);
455 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
456 /* increment the protection area to account for the LRU rotation. */
457 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
461 /*********************************
463 **********************************/
464 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
466 struct rb_node *node = root->rb_node;
467 struct zswap_entry *entry;
468 pgoff_t entry_offset;
471 entry = rb_entry(node, struct zswap_entry, rbnode);
472 entry_offset = swp_offset(entry->swpentry);
473 if (entry_offset > offset)
474 node = node->rb_left;
475 else if (entry_offset < offset)
476 node = node->rb_right;
484 * In the case that a entry with the same offset is found, a pointer to
485 * the existing entry is stored in dupentry and the function returns -EEXIST
487 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
488 struct zswap_entry **dupentry)
490 struct rb_node **link = &root->rb_node, *parent = NULL;
491 struct zswap_entry *myentry;
492 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
496 myentry = rb_entry(parent, struct zswap_entry, rbnode);
497 myentry_offset = swp_offset(myentry->swpentry);
498 if (myentry_offset > entry_offset)
499 link = &(*link)->rb_left;
500 else if (myentry_offset < entry_offset)
501 link = &(*link)->rb_right;
507 rb_link_node(&entry->rbnode, parent, link);
508 rb_insert_color(&entry->rbnode, root);
512 static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
514 if (!RB_EMPTY_NODE(&entry->rbnode)) {
515 rb_erase(&entry->rbnode, root);
516 RB_CLEAR_NODE(&entry->rbnode);
522 static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
526 if (ZSWAP_NR_ZPOOLS > 1)
527 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
529 return entry->pool->zpools[i];
533 * Carries out the common pattern of freeing and entry's zpool allocation,
534 * freeing the entry itself, and decrementing the number of stored pages.
536 static void zswap_free_entry(struct zswap_entry *entry)
539 atomic_dec(&zswap_same_filled_pages);
541 zswap_lru_del(&entry->pool->list_lru, entry);
542 zpool_free(zswap_find_zpool(entry), entry->handle);
543 atomic_dec(&entry->pool->nr_stored);
544 zswap_pool_put(entry->pool);
547 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
548 obj_cgroup_put(entry->objcg);
550 zswap_entry_cache_free(entry);
551 atomic_dec(&zswap_stored_pages);
552 zswap_update_total_size();
555 /* caller must hold the tree lock */
556 static void zswap_entry_get(struct zswap_entry *entry)
561 /* caller must hold the tree lock
562 * remove from the tree and free it, if nobody reference the entry
564 static void zswap_entry_put(struct zswap_tree *tree,
565 struct zswap_entry *entry)
567 int refcount = --entry->refcount;
569 WARN_ON_ONCE(refcount < 0);
571 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
572 zswap_free_entry(entry);
576 /* caller must hold the tree lock */
577 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
580 struct zswap_entry *entry;
582 entry = zswap_rb_search(root, offset);
584 zswap_entry_get(entry);
589 /*********************************
591 **********************************/
592 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
593 spinlock_t *lock, void *arg);
595 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
596 struct shrink_control *sc)
598 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
599 unsigned long shrink_ret, nr_protected, lru_size;
600 struct zswap_pool *pool = shrinker->private_data;
601 bool encountered_page_in_swapcache = false;
603 if (!zswap_shrinker_enabled ||
604 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
610 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
611 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
614 * Abort if we are shrinking into the protected region.
616 * This short-circuiting is necessary because if we have too many multiple
617 * concurrent reclaimers getting the freeable zswap object counts at the
618 * same time (before any of them made reasonable progress), the total
619 * number of reclaimed objects might be more than the number of unprotected
620 * objects (i.e the reclaimers will reclaim into the protected area of the
623 if (nr_protected >= lru_size - sc->nr_to_scan) {
628 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
629 &encountered_page_in_swapcache);
631 if (encountered_page_in_swapcache)
634 return shrink_ret ? shrink_ret : SHRINK_STOP;
637 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
638 struct shrink_control *sc)
640 struct zswap_pool *pool = shrinker->private_data;
641 struct mem_cgroup *memcg = sc->memcg;
642 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
643 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
645 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
649 * The shrinker resumes swap writeback, which will enter block
650 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
651 * rules (may_enter_fs()), which apply on a per-folio basis.
653 if (!gfp_has_io_fs(sc->gfp_mask))
657 * For memcg, use the cgroup-wide ZSWAP stats since we don't
658 * have them per-node and thus per-lruvec. Careful if memcg is
659 * runtime-disabled: we can get sc->memcg == NULL, which is ok
660 * for the lruvec, but not for memcg_page_state().
662 * Without memcg, use the zswap pool-wide metrics.
664 if (!mem_cgroup_disabled()) {
665 mem_cgroup_flush_stats(memcg);
666 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
667 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
669 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
670 nr_stored = atomic_read(&pool->nr_stored);
677 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
678 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
680 * Subtract the lru size by an estimate of the number of pages
681 * that should be protected.
683 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
686 * Scale the number of freeable pages by the memory saving factor.
687 * This ensures that the better zswap compresses memory, the fewer
688 * pages we will evict to swap (as it will otherwise incur IO for
689 * relatively small memory saving).
691 return mult_frac(nr_freeable, nr_backing, nr_stored);
694 static void zswap_alloc_shrinker(struct zswap_pool *pool)
697 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
701 pool->shrinker->private_data = pool;
702 pool->shrinker->scan_objects = zswap_shrinker_scan;
703 pool->shrinker->count_objects = zswap_shrinker_count;
704 pool->shrinker->batch = 0;
705 pool->shrinker->seeks = DEFAULT_SEEKS;
708 /*********************************
710 **********************************/
711 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
713 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
714 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
715 struct crypto_acomp *acomp;
716 struct acomp_req *req;
719 mutex_init(&acomp_ctx->mutex);
721 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
722 if (!acomp_ctx->buffer)
725 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
727 pr_err("could not alloc crypto acomp %s : %ld\n",
728 pool->tfm_name, PTR_ERR(acomp));
729 ret = PTR_ERR(acomp);
732 acomp_ctx->acomp = acomp;
734 req = acomp_request_alloc(acomp_ctx->acomp);
736 pr_err("could not alloc crypto acomp_request %s\n",
741 acomp_ctx->req = req;
743 crypto_init_wait(&acomp_ctx->wait);
745 * if the backend of acomp is async zip, crypto_req_done() will wakeup
746 * crypto_wait_req(); if the backend of acomp is scomp, the callback
747 * won't be called, crypto_wait_req() will return without blocking.
749 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
750 crypto_req_done, &acomp_ctx->wait);
755 crypto_free_acomp(acomp_ctx->acomp);
757 kfree(acomp_ctx->buffer);
761 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
763 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
764 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
766 if (!IS_ERR_OR_NULL(acomp_ctx)) {
767 if (!IS_ERR_OR_NULL(acomp_ctx->req))
768 acomp_request_free(acomp_ctx->req);
769 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
770 crypto_free_acomp(acomp_ctx->acomp);
771 kfree(acomp_ctx->buffer);
777 /*********************************
779 **********************************/
781 static struct zswap_pool *__zswap_pool_current(void)
783 struct zswap_pool *pool;
785 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
786 WARN_ONCE(!pool && zswap_has_pool,
787 "%s: no page storage pool!\n", __func__);
792 static struct zswap_pool *zswap_pool_current(void)
794 assert_spin_locked(&zswap_pools_lock);
796 return __zswap_pool_current();
799 static struct zswap_pool *zswap_pool_current_get(void)
801 struct zswap_pool *pool;
805 pool = __zswap_pool_current();
806 if (!zswap_pool_get(pool))
814 static struct zswap_pool *zswap_pool_last_get(void)
816 struct zswap_pool *pool, *last = NULL;
820 list_for_each_entry_rcu(pool, &zswap_pools, list)
822 WARN_ONCE(!last && zswap_has_pool,
823 "%s: no page storage pool!\n", __func__);
824 if (!zswap_pool_get(last))
832 /* type and compressor must be null-terminated */
833 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
835 struct zswap_pool *pool;
837 assert_spin_locked(&zswap_pools_lock);
839 list_for_each_entry_rcu(pool, &zswap_pools, list) {
840 if (strcmp(pool->tfm_name, compressor))
842 /* all zpools share the same type */
843 if (strcmp(zpool_get_type(pool->zpools[0]), type))
845 /* if we can't get it, it's about to be destroyed */
846 if (!zswap_pool_get(pool))
855 * If the entry is still valid in the tree, drop the initial ref and remove it
856 * from the tree. This function must be called with an additional ref held,
857 * otherwise it may race with another invalidation freeing the entry.
859 static void zswap_invalidate_entry(struct zswap_tree *tree,
860 struct zswap_entry *entry)
862 if (zswap_rb_erase(&tree->rbroot, entry))
863 zswap_entry_put(tree, entry);
866 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
867 spinlock_t *lock, void *arg)
869 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
870 bool *encountered_page_in_swapcache = (bool *)arg;
871 struct zswap_tree *tree;
873 enum lru_status ret = LRU_REMOVED_RETRY;
874 int writeback_result;
877 * Once the lru lock is dropped, the entry might get freed. The
878 * swpoffset is copied to the stack, and entry isn't deref'd again
879 * until the entry is verified to still be alive in the tree.
881 swpoffset = swp_offset(entry->swpentry);
882 tree = zswap_trees[swp_type(entry->swpentry)];
883 list_lru_isolate(l, item);
885 * It's safe to drop the lock here because we return either
886 * LRU_REMOVED_RETRY or LRU_RETRY.
890 /* Check for invalidate() race */
891 spin_lock(&tree->lock);
892 if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
895 /* Hold a reference to prevent a free during writeback */
896 zswap_entry_get(entry);
897 spin_unlock(&tree->lock);
899 writeback_result = zswap_writeback_entry(entry, tree);
901 spin_lock(&tree->lock);
902 if (writeback_result) {
903 zswap_reject_reclaim_fail++;
904 zswap_lru_putback(&entry->pool->list_lru, entry);
908 * Encountering a page already in swap cache is a sign that we are shrinking
909 * into the warmer region. We should terminate shrinking (if we're in the dynamic
912 if (writeback_result == -EEXIST && encountered_page_in_swapcache)
913 *encountered_page_in_swapcache = true;
917 zswap_written_back_pages++;
920 count_objcg_event(entry->objcg, ZSWPWB);
922 count_vm_event(ZSWPWB);
924 * Writeback started successfully, the page now belongs to the
925 * swapcache. Drop the entry from zswap - unless invalidate already
926 * took it out while we had the tree->lock released for IO.
928 zswap_invalidate_entry(tree, entry);
931 /* Drop local reference */
932 zswap_entry_put(tree, entry);
934 spin_unlock(&tree->lock);
939 static int shrink_memcg(struct mem_cgroup *memcg)
941 struct zswap_pool *pool;
944 if (!mem_cgroup_zswap_writeback_enabled(memcg))
948 * Skip zombies because their LRUs are reparented and we would be
949 * reclaiming from the parent instead of the dead memcg.
951 if (memcg && !mem_cgroup_online(memcg))
954 pool = zswap_pool_current_get();
958 for_each_node_state(nid, N_NORMAL_MEMORY) {
959 unsigned long nr_to_walk = 1;
961 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
962 &shrink_memcg_cb, NULL, &nr_to_walk);
964 zswap_pool_put(pool);
965 return shrunk ? 0 : -EAGAIN;
968 static void shrink_worker(struct work_struct *w)
970 struct zswap_pool *pool = container_of(w, typeof(*pool),
972 struct mem_cgroup *memcg;
973 int ret, failures = 0;
975 /* global reclaim will select cgroup in a round-robin fashion. */
977 spin_lock(&zswap_pools_lock);
978 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
979 memcg = pool->next_shrink;
982 * We need to retry if we have gone through a full round trip, or if we
983 * got an offline memcg (or else we risk undoing the effect of the
984 * zswap memcg offlining cleanup callback). This is not catastrophic
985 * per se, but it will keep the now offlined memcg hostage for a while.
987 * Note that if we got an online memcg, we will keep the extra
988 * reference in case the original reference obtained by mem_cgroup_iter
989 * is dropped by the zswap memcg offlining callback, ensuring that the
990 * memcg is not killed when we are reclaiming.
993 spin_unlock(&zswap_pools_lock);
994 if (++failures == MAX_RECLAIM_RETRIES)
1000 if (!mem_cgroup_tryget_online(memcg)) {
1001 /* drop the reference from mem_cgroup_iter() */
1002 mem_cgroup_iter_break(NULL, memcg);
1003 pool->next_shrink = NULL;
1004 spin_unlock(&zswap_pools_lock);
1006 if (++failures == MAX_RECLAIM_RETRIES)
1011 spin_unlock(&zswap_pools_lock);
1013 ret = shrink_memcg(memcg);
1014 /* drop the extra reference */
1015 mem_cgroup_put(memcg);
1019 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1024 } while (!zswap_can_accept());
1025 zswap_pool_put(pool);
1028 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1031 struct zswap_pool *pool;
1032 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
1033 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1036 if (!zswap_has_pool) {
1037 /* if either are unset, pool initialization failed, and we
1038 * need both params to be set correctly before trying to
1041 if (!strcmp(type, ZSWAP_PARAM_UNSET))
1043 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1047 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1051 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1052 /* unique name for each pool specifically required by zsmalloc */
1053 snprintf(name, 38, "zswap%x",
1054 atomic_inc_return(&zswap_pools_count));
1056 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1057 if (!pool->zpools[i]) {
1058 pr_err("%s zpool not available\n", type);
1062 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
1064 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1066 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1067 if (!pool->acomp_ctx) {
1068 pr_err("percpu alloc failed\n");
1072 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1077 zswap_alloc_shrinker(pool);
1078 if (!pool->shrinker)
1081 pr_debug("using %s compressor\n", pool->tfm_name);
1083 /* being the current pool takes 1 ref; this func expects the
1084 * caller to always add the new pool as the current pool
1086 kref_init(&pool->kref);
1087 INIT_LIST_HEAD(&pool->list);
1088 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1090 shrinker_register(pool->shrinker);
1091 INIT_WORK(&pool->shrink_work, shrink_worker);
1092 atomic_set(&pool->nr_stored, 0);
1094 zswap_pool_debug("created", pool);
1099 list_lru_destroy(&pool->list_lru);
1100 shrinker_free(pool->shrinker);
1102 if (pool->acomp_ctx)
1103 free_percpu(pool->acomp_ctx);
1105 zpool_destroy_pool(pool->zpools[i]);
1110 static struct zswap_pool *__zswap_pool_create_fallback(void)
1112 bool has_comp, has_zpool;
1114 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1115 if (!has_comp && strcmp(zswap_compressor,
1116 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
1117 pr_err("compressor %s not available, using default %s\n",
1118 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
1119 param_free_charp(&zswap_compressor);
1120 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1121 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1124 pr_err("default compressor %s not available\n",
1126 param_free_charp(&zswap_compressor);
1127 zswap_compressor = ZSWAP_PARAM_UNSET;
1130 has_zpool = zpool_has_pool(zswap_zpool_type);
1131 if (!has_zpool && strcmp(zswap_zpool_type,
1132 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
1133 pr_err("zpool %s not available, using default %s\n",
1134 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
1135 param_free_charp(&zswap_zpool_type);
1136 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
1137 has_zpool = zpool_has_pool(zswap_zpool_type);
1140 pr_err("default zpool %s not available\n",
1142 param_free_charp(&zswap_zpool_type);
1143 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1146 if (!has_comp || !has_zpool)
1149 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1152 static void zswap_pool_destroy(struct zswap_pool *pool)
1156 zswap_pool_debug("destroying", pool);
1158 shrinker_free(pool->shrinker);
1159 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1160 free_percpu(pool->acomp_ctx);
1161 list_lru_destroy(&pool->list_lru);
1163 spin_lock(&zswap_pools_lock);
1164 mem_cgroup_iter_break(NULL, pool->next_shrink);
1165 pool->next_shrink = NULL;
1166 spin_unlock(&zswap_pools_lock);
1168 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1169 zpool_destroy_pool(pool->zpools[i]);
1173 static int __must_check zswap_pool_get(struct zswap_pool *pool)
1178 return kref_get_unless_zero(&pool->kref);
1181 static void __zswap_pool_release(struct work_struct *work)
1183 struct zswap_pool *pool = container_of(work, typeof(*pool),
1188 /* nobody should have been able to get a kref... */
1189 WARN_ON(kref_get_unless_zero(&pool->kref));
1191 /* pool is now off zswap_pools list and has no references. */
1192 zswap_pool_destroy(pool);
1195 static void __zswap_pool_empty(struct kref *kref)
1197 struct zswap_pool *pool;
1199 pool = container_of(kref, typeof(*pool), kref);
1201 spin_lock(&zswap_pools_lock);
1203 WARN_ON(pool == zswap_pool_current());
1205 list_del_rcu(&pool->list);
1207 INIT_WORK(&pool->release_work, __zswap_pool_release);
1208 schedule_work(&pool->release_work);
1210 spin_unlock(&zswap_pools_lock);
1213 static void zswap_pool_put(struct zswap_pool *pool)
1215 kref_put(&pool->kref, __zswap_pool_empty);
1218 /*********************************
1220 **********************************/
1222 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1224 /* no change required */
1225 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1230 /* val must be a null-terminated string */
1231 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1232 char *type, char *compressor)
1234 struct zswap_pool *pool, *put_pool = NULL;
1235 char *s = strstrip((char *)val);
1237 bool new_pool = false;
1239 mutex_lock(&zswap_init_lock);
1240 switch (zswap_init_state) {
1242 /* if this is load-time (pre-init) param setting,
1243 * don't create a pool; that's done during init.
1245 ret = param_set_charp(s, kp);
1247 case ZSWAP_INIT_SUCCEED:
1248 new_pool = zswap_pool_changed(s, kp);
1250 case ZSWAP_INIT_FAILED:
1251 pr_err("can't set param, initialization failed\n");
1254 mutex_unlock(&zswap_init_lock);
1256 /* no need to create a new pool, return directly */
1261 if (!zpool_has_pool(s)) {
1262 pr_err("zpool %s not available\n", s);
1266 } else if (!compressor) {
1267 if (!crypto_has_acomp(s, 0, 0)) {
1268 pr_err("compressor %s not available\n", s);
1277 spin_lock(&zswap_pools_lock);
1279 pool = zswap_pool_find_get(type, compressor);
1281 zswap_pool_debug("using existing", pool);
1282 WARN_ON(pool == zswap_pool_current());
1283 list_del_rcu(&pool->list);
1286 spin_unlock(&zswap_pools_lock);
1289 pool = zswap_pool_create(type, compressor);
1292 ret = param_set_charp(s, kp);
1296 spin_lock(&zswap_pools_lock);
1299 put_pool = zswap_pool_current();
1300 list_add_rcu(&pool->list, &zswap_pools);
1301 zswap_has_pool = true;
1303 /* add the possibly pre-existing pool to the end of the pools
1304 * list; if it's new (and empty) then it'll be removed and
1305 * destroyed by the put after we drop the lock
1307 list_add_tail_rcu(&pool->list, &zswap_pools);
1311 spin_unlock(&zswap_pools_lock);
1313 if (!zswap_has_pool && !pool) {
1314 /* if initial pool creation failed, and this pool creation also
1315 * failed, maybe both compressor and zpool params were bad.
1316 * Allow changing this param, so pool creation will succeed
1317 * when the other param is changed. We already verified this
1318 * param is ok in the zpool_has_pool() or crypto_has_acomp()
1321 ret = param_set_charp(s, kp);
1324 /* drop the ref from either the old current pool,
1325 * or the new pool we failed to add
1328 zswap_pool_put(put_pool);
1333 static int zswap_compressor_param_set(const char *val,
1334 const struct kernel_param *kp)
1336 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1339 static int zswap_zpool_param_set(const char *val,
1340 const struct kernel_param *kp)
1342 return __zswap_param_set(val, kp, NULL, zswap_compressor);
1345 static int zswap_enabled_param_set(const char *val,
1346 const struct kernel_param *kp)
1350 /* if this is load-time (pre-init) param setting, only set param. */
1351 if (system_state != SYSTEM_RUNNING)
1352 return param_set_bool(val, kp);
1354 mutex_lock(&zswap_init_lock);
1355 switch (zswap_init_state) {
1360 case ZSWAP_INIT_SUCCEED:
1361 if (!zswap_has_pool)
1362 pr_err("can't enable, no pool configured\n");
1364 ret = param_set_bool(val, kp);
1366 case ZSWAP_INIT_FAILED:
1367 pr_err("can't enable, initialization failed\n");
1369 mutex_unlock(&zswap_init_lock);
1374 static void __zswap_load(struct zswap_entry *entry, struct page *page)
1376 struct zpool *zpool = zswap_find_zpool(entry);
1377 struct scatterlist input, output;
1378 struct crypto_acomp_ctx *acomp_ctx;
1381 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1382 mutex_lock(&acomp_ctx->mutex);
1384 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1385 if (!zpool_can_sleep_mapped(zpool)) {
1386 memcpy(acomp_ctx->buffer, src, entry->length);
1387 src = acomp_ctx->buffer;
1388 zpool_unmap_handle(zpool, entry->handle);
1391 sg_init_one(&input, src, entry->length);
1392 sg_init_table(&output, 1);
1393 sg_set_page(&output, page, PAGE_SIZE, 0);
1394 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1395 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1396 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1397 mutex_unlock(&acomp_ctx->mutex);
1399 if (zpool_can_sleep_mapped(zpool))
1400 zpool_unmap_handle(zpool, entry->handle);
1403 /*********************************
1405 **********************************/
1407 * Attempts to free an entry by adding a folio to the swap cache,
1408 * decompressing the entry data into the folio, and issuing a
1409 * bio write to write the folio back to the swap device.
1411 * This can be thought of as a "resumed writeback" of the folio
1412 * to the swap device. We are basically resuming the same swap
1413 * writeback path that was intercepted with the zswap_store()
1414 * in the first place. After the folio has been decompressed into
1415 * the swap cache, the compressed version stored by zswap can be
1418 static int zswap_writeback_entry(struct zswap_entry *entry,
1419 struct zswap_tree *tree)
1421 swp_entry_t swpentry = entry->swpentry;
1422 struct folio *folio;
1423 struct mempolicy *mpol;
1424 bool folio_was_allocated;
1425 struct writeback_control wbc = {
1426 .sync_mode = WB_SYNC_NONE,
1429 /* try to allocate swap cache folio */
1430 mpol = get_task_policy(current);
1431 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1432 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1437 * Found an existing folio, we raced with load/swapin. We generally
1438 * writeback cold folios from zswap, and swapin means the folio just
1439 * became hot. Skip this folio and let the caller find another one.
1441 if (!folio_was_allocated) {
1447 * folio is locked, and the swapcache is now secured against
1448 * concurrent swapping to and from the slot. Verify that the
1449 * swap entry hasn't been invalidated and recycled behind our
1450 * backs (our zswap_entry reference doesn't prevent that), to
1451 * avoid overwriting a new swap folio with old compressed data.
1453 spin_lock(&tree->lock);
1454 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
1455 spin_unlock(&tree->lock);
1456 delete_from_swap_cache(folio);
1457 folio_unlock(folio);
1461 spin_unlock(&tree->lock);
1463 __zswap_load(entry, &folio->page);
1465 /* folio is up to date */
1466 folio_mark_uptodate(folio);
1468 /* move it to the tail of the inactive list after end_writeback */
1469 folio_set_reclaim(folio);
1471 /* start writeback */
1472 __swap_writepage(folio, &wbc);
1478 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1480 unsigned long *page;
1482 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1484 page = (unsigned long *)ptr;
1487 if (val != page[last_pos])
1490 for (pos = 1; pos < last_pos; pos++) {
1491 if (val != page[pos])
1500 static void zswap_fill_page(void *ptr, unsigned long value)
1502 unsigned long *page;
1504 page = (unsigned long *)ptr;
1505 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1508 bool zswap_store(struct folio *folio)
1510 swp_entry_t swp = folio->swap;
1511 int type = swp_type(swp);
1512 pgoff_t offset = swp_offset(swp);
1513 struct page *page = &folio->page;
1514 struct zswap_tree *tree = zswap_trees[type];
1515 struct zswap_entry *entry, *dupentry;
1516 struct scatterlist input, output;
1517 struct crypto_acomp_ctx *acomp_ctx;
1518 struct obj_cgroup *objcg = NULL;
1519 struct mem_cgroup *memcg = NULL;
1520 struct zswap_pool *pool;
1521 struct zpool *zpool;
1522 unsigned int dlen = PAGE_SIZE;
1523 unsigned long handle, value;
1529 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1530 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1532 /* Large folios aren't supported */
1533 if (folio_test_large(folio))
1540 * If this is a duplicate, it must be removed before attempting to store
1541 * it, otherwise, if the store fails the old page won't be removed from
1542 * the tree, and it might be written back overriding the new data.
1544 spin_lock(&tree->lock);
1545 dupentry = zswap_rb_search(&tree->rbroot, offset);
1547 zswap_duplicate_entry++;
1548 zswap_invalidate_entry(tree, dupentry);
1550 spin_unlock(&tree->lock);
1555 objcg = get_obj_cgroup_from_folio(folio);
1556 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1557 memcg = get_mem_cgroup_from_objcg(objcg);
1558 if (shrink_memcg(memcg)) {
1559 mem_cgroup_put(memcg);
1562 mem_cgroup_put(memcg);
1565 /* reclaim space if needed */
1566 if (zswap_is_full()) {
1567 zswap_pool_limit_hit++;
1568 zswap_pool_reached_full = true;
1572 if (zswap_pool_reached_full) {
1573 if (!zswap_can_accept())
1576 zswap_pool_reached_full = false;
1579 /* allocate entry */
1580 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1582 zswap_reject_kmemcache_fail++;
1586 if (zswap_same_filled_pages_enabled) {
1587 src = kmap_local_page(page);
1588 if (zswap_is_page_same_filled(src, &value)) {
1590 entry->swpentry = swp_entry(type, offset);
1592 entry->value = value;
1593 atomic_inc(&zswap_same_filled_pages);
1599 if (!zswap_non_same_filled_pages_enabled)
1602 /* if entry is successfully added, it keeps the reference */
1603 entry->pool = zswap_pool_current_get();
1608 memcg = get_mem_cgroup_from_objcg(objcg);
1609 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1610 mem_cgroup_put(memcg);
1613 mem_cgroup_put(memcg);
1617 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1619 mutex_lock(&acomp_ctx->mutex);
1621 dst = acomp_ctx->buffer;
1622 sg_init_table(&input, 1);
1623 sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1626 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1627 * and hardware-accelerators may won't check the dst buffer size, so
1628 * giving the dst buffer with enough length to avoid buffer overflow.
1630 sg_init_one(&output, dst, PAGE_SIZE * 2);
1631 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1633 * it maybe looks a little bit silly that we send an asynchronous request,
1634 * then wait for its completion synchronously. This makes the process look
1635 * synchronous in fact.
1636 * Theoretically, acomp supports users send multiple acomp requests in one
1637 * acomp instance, then get those requests done simultaneously. but in this
1638 * case, zswap actually does store and load page by page, there is no
1639 * existing method to send the second page before the first page is done
1640 * in one thread doing zwap.
1641 * but in different threads running on different cpu, we have different
1642 * acomp instance, so multiple threads can do (de)compression in parallel.
1644 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1645 dlen = acomp_ctx->req->dlen;
1648 zswap_reject_compress_fail++;
1653 zpool = zswap_find_zpool(entry);
1654 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1655 if (zpool_malloc_support_movable(zpool))
1656 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1657 ret = zpool_malloc(zpool, dlen, gfp, &handle);
1658 if (ret == -ENOSPC) {
1659 zswap_reject_compress_poor++;
1663 zswap_reject_alloc_fail++;
1666 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1667 memcpy(buf, dst, dlen);
1668 zpool_unmap_handle(zpool, handle);
1669 mutex_unlock(&acomp_ctx->mutex);
1671 /* populate entry */
1672 entry->swpentry = swp_entry(type, offset);
1673 entry->handle = handle;
1674 entry->length = dlen;
1677 entry->objcg = objcg;
1679 obj_cgroup_charge_zswap(objcg, entry->length);
1680 /* Account before objcg ref is moved to tree */
1681 count_objcg_event(objcg, ZSWPOUT);
1685 spin_lock(&tree->lock);
1687 * A duplicate entry should have been removed at the beginning of this
1688 * function. Since the swap entry should be pinned, if a duplicate is
1689 * found again here it means that something went wrong in the swap
1692 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1694 zswap_duplicate_entry++;
1695 zswap_invalidate_entry(tree, dupentry);
1697 if (entry->length) {
1698 INIT_LIST_HEAD(&entry->lru);
1699 zswap_lru_add(&entry->pool->list_lru, entry);
1700 atomic_inc(&entry->pool->nr_stored);
1702 spin_unlock(&tree->lock);
1705 atomic_inc(&zswap_stored_pages);
1706 zswap_update_total_size();
1707 count_vm_event(ZSWPOUT);
1712 mutex_unlock(&acomp_ctx->mutex);
1714 zswap_pool_put(entry->pool);
1716 zswap_entry_cache_free(entry);
1719 obj_cgroup_put(objcg);
1723 pool = zswap_pool_last_get();
1724 if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1725 zswap_pool_put(pool);
1729 bool zswap_load(struct folio *folio)
1731 swp_entry_t swp = folio->swap;
1732 int type = swp_type(swp);
1733 pgoff_t offset = swp_offset(swp);
1734 struct page *page = &folio->page;
1735 struct zswap_tree *tree = zswap_trees[type];
1736 struct zswap_entry *entry;
1739 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1742 spin_lock(&tree->lock);
1743 entry = zswap_entry_find_get(&tree->rbroot, offset);
1745 spin_unlock(&tree->lock);
1748 spin_unlock(&tree->lock);
1751 __zswap_load(entry, page);
1753 dst = kmap_local_page(page);
1754 zswap_fill_page(dst, entry->value);
1758 count_vm_event(ZSWPIN);
1760 count_objcg_event(entry->objcg, ZSWPIN);
1762 spin_lock(&tree->lock);
1763 if (zswap_exclusive_loads_enabled) {
1764 zswap_invalidate_entry(tree, entry);
1765 folio_mark_dirty(folio);
1766 } else if (entry->length) {
1767 zswap_lru_del(&entry->pool->list_lru, entry);
1768 zswap_lru_add(&entry->pool->list_lru, entry);
1770 zswap_entry_put(tree, entry);
1771 spin_unlock(&tree->lock);
1776 void zswap_invalidate(int type, pgoff_t offset)
1778 struct zswap_tree *tree = zswap_trees[type];
1779 struct zswap_entry *entry;
1782 spin_lock(&tree->lock);
1783 entry = zswap_rb_search(&tree->rbroot, offset);
1785 /* entry was written back */
1786 spin_unlock(&tree->lock);
1789 zswap_invalidate_entry(tree, entry);
1790 spin_unlock(&tree->lock);
1793 void zswap_swapon(int type)
1795 struct zswap_tree *tree;
1797 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1799 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1803 tree->rbroot = RB_ROOT;
1804 spin_lock_init(&tree->lock);
1805 zswap_trees[type] = tree;
1808 void zswap_swapoff(int type)
1810 struct zswap_tree *tree = zswap_trees[type];
1811 struct zswap_entry *entry, *n;
1816 /* walk the tree and free everything */
1817 spin_lock(&tree->lock);
1818 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1819 zswap_free_entry(entry);
1820 tree->rbroot = RB_ROOT;
1821 spin_unlock(&tree->lock);
1823 zswap_trees[type] = NULL;
1826 /*********************************
1828 **********************************/
1829 #ifdef CONFIG_DEBUG_FS
1830 #include <linux/debugfs.h>
1832 static struct dentry *zswap_debugfs_root;
1834 static int zswap_debugfs_init(void)
1836 if (!debugfs_initialized())
1839 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1841 debugfs_create_u64("pool_limit_hit", 0444,
1842 zswap_debugfs_root, &zswap_pool_limit_hit);
1843 debugfs_create_u64("reject_reclaim_fail", 0444,
1844 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1845 debugfs_create_u64("reject_alloc_fail", 0444,
1846 zswap_debugfs_root, &zswap_reject_alloc_fail);
1847 debugfs_create_u64("reject_kmemcache_fail", 0444,
1848 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1849 debugfs_create_u64("reject_compress_fail", 0444,
1850 zswap_debugfs_root, &zswap_reject_compress_fail);
1851 debugfs_create_u64("reject_compress_poor", 0444,
1852 zswap_debugfs_root, &zswap_reject_compress_poor);
1853 debugfs_create_u64("written_back_pages", 0444,
1854 zswap_debugfs_root, &zswap_written_back_pages);
1855 debugfs_create_u64("duplicate_entry", 0444,
1856 zswap_debugfs_root, &zswap_duplicate_entry);
1857 debugfs_create_u64("pool_total_size", 0444,
1858 zswap_debugfs_root, &zswap_pool_total_size);
1859 debugfs_create_atomic_t("stored_pages", 0444,
1860 zswap_debugfs_root, &zswap_stored_pages);
1861 debugfs_create_atomic_t("same_filled_pages", 0444,
1862 zswap_debugfs_root, &zswap_same_filled_pages);
1867 static int zswap_debugfs_init(void)
1873 /*********************************
1874 * module init and exit
1875 **********************************/
1876 static int zswap_setup(void)
1878 struct zswap_pool *pool;
1881 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1882 if (!zswap_entry_cache) {
1883 pr_err("entry cache creation failed\n");
1887 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1888 "mm/zswap_pool:prepare",
1889 zswap_cpu_comp_prepare,
1890 zswap_cpu_comp_dead);
1894 pool = __zswap_pool_create_fallback();
1896 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1897 zpool_get_type(pool->zpools[0]));
1898 list_add(&pool->list, &zswap_pools);
1899 zswap_has_pool = true;
1901 pr_err("pool creation failed\n");
1902 zswap_enabled = false;
1905 shrink_wq = create_workqueue("zswap-shrink");
1909 if (zswap_debugfs_init())
1910 pr_warn("debugfs initialization failed\n");
1911 zswap_init_state = ZSWAP_INIT_SUCCEED;
1916 zswap_pool_destroy(pool);
1918 kmem_cache_destroy(zswap_entry_cache);
1920 /* if built-in, we aren't unloaded on failure; don't allow use */
1921 zswap_init_state = ZSWAP_INIT_FAILED;
1922 zswap_enabled = false;
1926 static int __init zswap_init(void)
1930 return zswap_setup();
1932 /* must be late so crypto has time to come up */
1933 late_initcall(zswap_init);
1935 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1936 MODULE_DESCRIPTION("Compressed cache for swap pages");