1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/power/swap.c
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
13 #define pr_fmt(fmt) "PM: " fmt
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/device.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
25 #include <linux/slab.h>
26 #include <linux/lzo.h>
27 #include <linux/vmalloc.h>
28 #include <linux/cpumask.h>
29 #include <linux/atomic.h>
30 #include <linux/kthread.h>
31 #include <linux/crc32.h>
32 #include <linux/ktime.h>
36 #define HIBERNATE_SIG "S1SUSPEND"
38 u32 swsusp_hardware_signature;
41 * When reading an {un,}compressed image, we may restore pages in place,
42 * in which case some architectures need these pages cleaning before they
43 * can be executed. We don't know which pages these may be, so clean the lot.
45 static bool clean_pages_on_read;
46 static bool clean_pages_on_decompress;
49 * The swap map is a data structure used for keeping track of each page
50 * written to a swap partition. It consists of many swap_map_page
51 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
52 * These structures are stored on the swap and linked together with the
53 * help of the .next_swap member.
55 * The swap map is created during suspend. The swap map pages are
56 * allocated and populated one at a time, so we only need one memory
57 * page to set up the entire structure.
59 * During resume we pick up all swap_map_page structures into a list.
62 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
65 * Number of free pages that are not high.
67 static inline unsigned long low_free_pages(void)
69 return nr_free_pages() - nr_free_highpages();
73 * Number of pages required to be kept free while writing the image. Always
74 * half of all available low pages before the writing starts.
76 static inline unsigned long reqd_free_pages(void)
78 return low_free_pages() / 2;
81 struct swap_map_page {
82 sector_t entries[MAP_PAGE_ENTRIES];
86 struct swap_map_page_list {
87 struct swap_map_page *map;
88 struct swap_map_page_list *next;
92 * The swap_map_handle structure is used for handling swap in
96 struct swap_map_handle {
97 struct swap_map_page *cur;
98 struct swap_map_page_list *maps;
100 sector_t first_sector;
102 unsigned long reqd_free_pages;
106 struct swsusp_header {
107 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108 sizeof(u32) - sizeof(u32)];
112 unsigned int flags; /* Flags to pass to the "boot" kernel */
117 static struct swsusp_header *swsusp_header;
120 * The following functions are used for tracing the allocated
121 * swap pages, so that they can be freed in case of an error.
124 struct swsusp_extent {
130 static struct rb_root swsusp_extents = RB_ROOT;
132 static int swsusp_extents_insert(unsigned long swap_offset)
134 struct rb_node **new = &(swsusp_extents.rb_node);
135 struct rb_node *parent = NULL;
136 struct swsusp_extent *ext;
138 /* Figure out where to put the new node */
140 ext = rb_entry(*new, struct swsusp_extent, node);
142 if (swap_offset < ext->start) {
144 if (swap_offset == ext->start - 1) {
148 new = &((*new)->rb_left);
149 } else if (swap_offset > ext->end) {
151 if (swap_offset == ext->end + 1) {
155 new = &((*new)->rb_right);
157 /* It already is in the tree */
161 /* Add the new node and rebalance the tree. */
162 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
166 ext->start = swap_offset;
167 ext->end = swap_offset;
168 rb_link_node(&ext->node, parent, new);
169 rb_insert_color(&ext->node, &swsusp_extents);
174 * alloc_swapdev_block - allocate a swap page and register that it has
175 * been allocated, so that it can be freed in case of an error.
178 sector_t alloc_swapdev_block(int swap)
180 unsigned long offset;
182 offset = swp_offset(get_swap_page_of_type(swap));
184 if (swsusp_extents_insert(offset))
185 swap_free(swp_entry(swap, offset));
187 return swapdev_block(swap, offset);
193 * free_all_swap_pages - free swap pages allocated for saving image data.
194 * It also frees the extents used to register which swap entries had been
198 void free_all_swap_pages(int swap)
200 struct rb_node *node;
202 while ((node = swsusp_extents.rb_node)) {
203 struct swsusp_extent *ext;
204 unsigned long offset;
206 ext = rb_entry(node, struct swsusp_extent, node);
207 rb_erase(node, &swsusp_extents);
208 for (offset = ext->start; offset <= ext->end; offset++)
209 swap_free(swp_entry(swap, offset));
215 int swsusp_swap_in_use(void)
217 return (swsusp_extents.rb_node != NULL);
224 static unsigned short root_swap = 0xffff;
225 static struct bdev_handle *hib_resume_bdev_handle;
227 struct hib_bio_batch {
229 wait_queue_head_t wait;
231 struct blk_plug plug;
234 static void hib_init_batch(struct hib_bio_batch *hb)
236 atomic_set(&hb->count, 0);
237 init_waitqueue_head(&hb->wait);
238 hb->error = BLK_STS_OK;
239 blk_start_plug(&hb->plug);
242 static void hib_finish_batch(struct hib_bio_batch *hb)
244 blk_finish_plug(&hb->plug);
247 static void hib_end_io(struct bio *bio)
249 struct hib_bio_batch *hb = bio->bi_private;
250 struct page *page = bio_first_page_all(bio);
252 if (bio->bi_status) {
253 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
254 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
255 (unsigned long long)bio->bi_iter.bi_sector);
258 if (bio_data_dir(bio) == WRITE)
260 else if (clean_pages_on_read)
261 flush_icache_range((unsigned long)page_address(page),
262 (unsigned long)page_address(page) + PAGE_SIZE);
264 if (bio->bi_status && !hb->error)
265 hb->error = bio->bi_status;
266 if (atomic_dec_and_test(&hb->count))
272 static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
273 struct hib_bio_batch *hb)
275 struct page *page = virt_to_page(addr);
279 bio = bio_alloc(hib_resume_bdev_handle->bdev, 1, opf,
280 GFP_NOIO | __GFP_HIGH);
281 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
283 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
284 pr_err("Adding page to bio failed at %llu\n",
285 (unsigned long long)bio->bi_iter.bi_sector);
291 bio->bi_end_io = hib_end_io;
292 bio->bi_private = hb;
293 atomic_inc(&hb->count);
296 error = submit_bio_wait(bio);
303 static int hib_wait_io(struct hib_bio_batch *hb)
306 * We are relying on the behavior of blk_plug that a thread with
307 * a plug will flush the plug list before sleeping.
309 wait_event(hb->wait, atomic_read(&hb->count) == 0);
310 return blk_status_to_errno(hb->error);
316 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
320 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
321 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
322 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
323 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
324 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
325 swsusp_header->image = handle->first_sector;
326 if (swsusp_hardware_signature) {
327 swsusp_header->hw_sig = swsusp_hardware_signature;
330 swsusp_header->flags = flags;
331 if (flags & SF_CRC32_MODE)
332 swsusp_header->crc32 = handle->crc32;
333 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
334 swsusp_resume_block, swsusp_header, NULL);
336 pr_err("Swap header not found!\n");
343 * swsusp_swap_check - check if the resume device is a swap device
344 * and get its index (if so)
346 * This is called before saving image
348 static int swsusp_swap_check(void)
352 if (swsusp_resume_device)
353 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
355 res = find_first_swap(&swsusp_resume_device);
360 hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
361 BLK_OPEN_WRITE, NULL, NULL);
362 if (IS_ERR(hib_resume_bdev_handle))
363 return PTR_ERR(hib_resume_bdev_handle);
365 res = set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
367 bdev_release(hib_resume_bdev_handle);
373 * write_page - Write one page to given swap location.
374 * @buf: Address we're writing.
375 * @offset: Offset of the swap page we're writing to.
376 * @hb: bio completion batch
379 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
388 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
393 ret = hib_wait_io(hb); /* Free pages */
396 src = (void *)__get_free_page(GFP_NOIO |
403 hb = NULL; /* Go synchronous */
410 return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
413 static void release_swap_writer(struct swap_map_handle *handle)
416 free_page((unsigned long)handle->cur);
420 static int get_swap_writer(struct swap_map_handle *handle)
424 ret = swsusp_swap_check();
427 pr_err("Cannot find swap device, try swapon -a\n");
430 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
435 handle->cur_swap = alloc_swapdev_block(root_swap);
436 if (!handle->cur_swap) {
441 handle->reqd_free_pages = reqd_free_pages();
442 handle->first_sector = handle->cur_swap;
445 release_swap_writer(handle);
451 static int swap_write_page(struct swap_map_handle *handle, void *buf,
452 struct hib_bio_batch *hb)
459 offset = alloc_swapdev_block(root_swap);
460 error = write_page(buf, offset, hb);
463 handle->cur->entries[handle->k++] = offset;
464 if (handle->k >= MAP_PAGE_ENTRIES) {
465 offset = alloc_swapdev_block(root_swap);
468 handle->cur->next_swap = offset;
469 error = write_page(handle->cur, handle->cur_swap, hb);
472 clear_page(handle->cur);
473 handle->cur_swap = offset;
476 if (hb && low_free_pages() <= handle->reqd_free_pages) {
477 error = hib_wait_io(hb);
481 * Recalculate the number of required free pages, to
482 * make sure we never take more than half.
484 handle->reqd_free_pages = reqd_free_pages();
491 static int flush_swap_writer(struct swap_map_handle *handle)
493 if (handle->cur && handle->cur_swap)
494 return write_page(handle->cur, handle->cur_swap, NULL);
499 static int swap_writer_finish(struct swap_map_handle *handle,
500 unsigned int flags, int error)
504 error = mark_swapfiles(handle, flags);
506 flush_swap_writer(handle);
510 free_all_swap_pages(root_swap);
511 release_swap_writer(handle);
517 /* We need to remember how much compressed data we need to read. */
518 #define LZO_HEADER sizeof(size_t)
520 /* Number of pages/bytes we'll compress at one time. */
521 #define LZO_UNC_PAGES 32
522 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
524 /* Number of pages/bytes we need for compressed data (worst case). */
525 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
526 LZO_HEADER, PAGE_SIZE)
527 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
529 /* Maximum number of threads for compression/decompression. */
530 #define LZO_THREADS 3
532 /* Minimum/maximum number of pages for read buffering. */
533 #define LZO_MIN_RD_PAGES 1024
534 #define LZO_MAX_RD_PAGES 8192
538 * save_image - save the suspend image data
541 static int save_image(struct swap_map_handle *handle,
542 struct snapshot_handle *snapshot,
543 unsigned int nr_to_write)
549 struct hib_bio_batch hb;
555 pr_info("Saving image data pages (%u pages)...\n",
557 m = nr_to_write / 10;
563 ret = snapshot_read_next(snapshot);
566 ret = swap_write_page(handle, data_of(*snapshot), &hb);
570 pr_info("Image saving progress: %3d%%\n",
574 err2 = hib_wait_io(&hb);
575 hib_finish_batch(&hb);
580 pr_info("Image saving done\n");
581 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
586 * Structure used for CRC32.
589 struct task_struct *thr; /* thread */
590 atomic_t ready; /* ready to start flag */
591 atomic_t stop; /* ready to stop flag */
592 unsigned run_threads; /* nr current threads */
593 wait_queue_head_t go; /* start crc update */
594 wait_queue_head_t done; /* crc update done */
595 u32 *crc32; /* points to handle's crc32 */
596 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
597 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
601 * CRC32 update function that runs in its own thread.
603 static int crc32_threadfn(void *data)
605 struct crc_data *d = data;
609 wait_event(d->go, atomic_read(&d->ready) ||
610 kthread_should_stop());
611 if (kthread_should_stop()) {
613 atomic_set(&d->stop, 1);
617 atomic_set(&d->ready, 0);
619 for (i = 0; i < d->run_threads; i++)
620 *d->crc32 = crc32_le(*d->crc32,
621 d->unc[i], *d->unc_len[i]);
622 atomic_set(&d->stop, 1);
628 * Structure used for LZO data compression.
631 struct task_struct *thr; /* thread */
632 atomic_t ready; /* ready to start flag */
633 atomic_t stop; /* ready to stop flag */
634 int ret; /* return code */
635 wait_queue_head_t go; /* start compression */
636 wait_queue_head_t done; /* compression done */
637 size_t unc_len; /* uncompressed length */
638 size_t cmp_len; /* compressed length */
639 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
640 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
641 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
645 * Compression function that runs in its own thread.
647 static int lzo_compress_threadfn(void *data)
649 struct cmp_data *d = data;
652 wait_event(d->go, atomic_read(&d->ready) ||
653 kthread_should_stop());
654 if (kthread_should_stop()) {
657 atomic_set(&d->stop, 1);
661 atomic_set(&d->ready, 0);
663 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
664 d->cmp + LZO_HEADER, &d->cmp_len,
666 atomic_set(&d->stop, 1);
673 * save_image_lzo - Save the suspend image data compressed with LZO.
674 * @handle: Swap map handle to use for saving the image.
675 * @snapshot: Image to read data from.
676 * @nr_to_write: Number of pages to save.
678 static int save_image_lzo(struct swap_map_handle *handle,
679 struct snapshot_handle *snapshot,
680 unsigned int nr_to_write)
686 struct hib_bio_batch hb;
690 unsigned thr, run_threads, nr_threads;
691 unsigned char *page = NULL;
692 struct cmp_data *data = NULL;
693 struct crc_data *crc = NULL;
698 * We'll limit the number of threads for compression to limit memory
701 nr_threads = num_online_cpus() - 1;
702 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
704 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
706 pr_err("Failed to allocate LZO page\n");
711 data = vzalloc(array_size(nr_threads, sizeof(*data)));
713 pr_err("Failed to allocate LZO data\n");
718 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
720 pr_err("Failed to allocate crc\n");
726 * Start the compression threads.
728 for (thr = 0; thr < nr_threads; thr++) {
729 init_waitqueue_head(&data[thr].go);
730 init_waitqueue_head(&data[thr].done);
732 data[thr].thr = kthread_run(lzo_compress_threadfn,
734 "image_compress/%u", thr);
735 if (IS_ERR(data[thr].thr)) {
736 data[thr].thr = NULL;
737 pr_err("Cannot start compression threads\n");
744 * Start the CRC32 thread.
746 init_waitqueue_head(&crc->go);
747 init_waitqueue_head(&crc->done);
750 crc->crc32 = &handle->crc32;
751 for (thr = 0; thr < nr_threads; thr++) {
752 crc->unc[thr] = data[thr].unc;
753 crc->unc_len[thr] = &data[thr].unc_len;
756 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
757 if (IS_ERR(crc->thr)) {
759 pr_err("Cannot start CRC32 thread\n");
765 * Adjust the number of required free pages after all allocations have
766 * been done. We don't want to run out of pages when writing.
768 handle->reqd_free_pages = reqd_free_pages();
770 pr_info("Using %u thread(s) for compression\n", nr_threads);
771 pr_info("Compressing and saving image data (%u pages)...\n",
773 m = nr_to_write / 10;
779 for (thr = 0; thr < nr_threads; thr++) {
780 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
781 ret = snapshot_read_next(snapshot);
788 memcpy(data[thr].unc + off,
789 data_of(*snapshot), PAGE_SIZE);
792 pr_info("Image saving progress: %3d%%\n",
799 data[thr].unc_len = off;
801 atomic_set(&data[thr].ready, 1);
802 wake_up(&data[thr].go);
808 crc->run_threads = thr;
809 atomic_set(&crc->ready, 1);
812 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
813 wait_event(data[thr].done,
814 atomic_read(&data[thr].stop));
815 atomic_set(&data[thr].stop, 0);
820 pr_err("LZO compression failed\n");
824 if (unlikely(!data[thr].cmp_len ||
826 lzo1x_worst_compress(data[thr].unc_len))) {
827 pr_err("Invalid LZO compressed length\n");
832 *(size_t *)data[thr].cmp = data[thr].cmp_len;
835 * Given we are writing one page at a time to disk, we
836 * copy that much from the buffer, although the last
837 * bit will likely be smaller than full page. This is
838 * OK - we saved the length of the compressed data, so
839 * any garbage at the end will be discarded when we
843 off < LZO_HEADER + data[thr].cmp_len;
845 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
847 ret = swap_write_page(handle, page, &hb);
853 wait_event(crc->done, atomic_read(&crc->stop));
854 atomic_set(&crc->stop, 0);
858 err2 = hib_wait_io(&hb);
863 pr_info("Image saving done\n");
864 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
866 hib_finish_batch(&hb);
869 kthread_stop(crc->thr);
873 for (thr = 0; thr < nr_threads; thr++)
875 kthread_stop(data[thr].thr);
878 if (page) free_page((unsigned long)page);
884 * enough_swap - Make sure we have enough swap to save the image.
886 * Returns TRUE or FALSE after checking the total amount of swap
887 * space available from the resume partition.
890 static int enough_swap(unsigned int nr_pages)
892 unsigned int free_swap = count_swap_pages(root_swap, 1);
893 unsigned int required;
895 pr_debug("Free swap pages: %u\n", free_swap);
897 required = PAGES_FOR_IO + nr_pages;
898 return free_swap > required;
902 * swsusp_write - Write entire image and metadata.
903 * @flags: flags to pass to the "boot" kernel in the image header
905 * It is important _NOT_ to umount filesystems at this point. We want
906 * them synced (in case something goes wrong) but we DO not want to mark
907 * filesystem clean: it is not. (And it does not matter, if we resume
908 * correctly, we'll mark system clean, anyway.)
911 int swsusp_write(unsigned int flags)
913 struct swap_map_handle handle;
914 struct snapshot_handle snapshot;
915 struct swsusp_info *header;
919 pages = snapshot_get_image_size();
920 error = get_swap_writer(&handle);
922 pr_err("Cannot get swap writer\n");
925 if (flags & SF_NOCOMPRESS_MODE) {
926 if (!enough_swap(pages)) {
927 pr_err("Not enough free swap\n");
932 memset(&snapshot, 0, sizeof(struct snapshot_handle));
933 error = snapshot_read_next(&snapshot);
934 if (error < (int)PAGE_SIZE) {
940 header = (struct swsusp_info *)data_of(snapshot);
941 error = swap_write_page(&handle, header, NULL);
943 error = (flags & SF_NOCOMPRESS_MODE) ?
944 save_image(&handle, &snapshot, pages - 1) :
945 save_image_lzo(&handle, &snapshot, pages - 1);
948 error = swap_writer_finish(&handle, flags, error);
953 * The following functions allow us to read data using a swap map
954 * in a file-like way.
957 static void release_swap_reader(struct swap_map_handle *handle)
959 struct swap_map_page_list *tmp;
961 while (handle->maps) {
962 if (handle->maps->map)
963 free_page((unsigned long)handle->maps->map);
965 handle->maps = handle->maps->next;
971 static int get_swap_reader(struct swap_map_handle *handle,
972 unsigned int *flags_p)
975 struct swap_map_page_list *tmp, *last;
978 *flags_p = swsusp_header->flags;
980 if (!swsusp_header->image) /* how can this happen? */
984 last = handle->maps = NULL;
985 offset = swsusp_header->image;
987 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
989 release_swap_reader(handle);
998 tmp->map = (struct swap_map_page *)
999 __get_free_page(GFP_NOIO | __GFP_HIGH);
1001 release_swap_reader(handle);
1005 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1007 release_swap_reader(handle);
1010 offset = tmp->map->next_swap;
1013 handle->cur = handle->maps->map;
1017 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1018 struct hib_bio_batch *hb)
1022 struct swap_map_page_list *tmp;
1026 offset = handle->cur->entries[handle->k];
1029 error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1032 if (++handle->k >= MAP_PAGE_ENTRIES) {
1034 free_page((unsigned long)handle->maps->map);
1036 handle->maps = handle->maps->next;
1039 release_swap_reader(handle);
1041 handle->cur = handle->maps->map;
1046 static int swap_reader_finish(struct swap_map_handle *handle)
1048 release_swap_reader(handle);
1054 * load_image - load the image using the swap map handle
1055 * @handle and the snapshot handle @snapshot
1056 * (assume there are @nr_pages pages to load)
1059 static int load_image(struct swap_map_handle *handle,
1060 struct snapshot_handle *snapshot,
1061 unsigned int nr_to_read)
1067 struct hib_bio_batch hb;
1071 hib_init_batch(&hb);
1073 clean_pages_on_read = true;
1074 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1075 m = nr_to_read / 10;
1079 start = ktime_get();
1081 ret = snapshot_write_next(snapshot);
1084 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1087 if (snapshot->sync_read)
1088 ret = hib_wait_io(&hb);
1091 if (!(nr_pages % m))
1092 pr_info("Image loading progress: %3d%%\n",
1096 err2 = hib_wait_io(&hb);
1097 hib_finish_batch(&hb);
1102 pr_info("Image loading done\n");
1103 snapshot_write_finalize(snapshot);
1104 if (!snapshot_image_loaded(snapshot))
1107 swsusp_show_speed(start, stop, nr_to_read, "Read");
1112 * Structure used for LZO data decompression.
1115 struct task_struct *thr; /* thread */
1116 atomic_t ready; /* ready to start flag */
1117 atomic_t stop; /* ready to stop flag */
1118 int ret; /* return code */
1119 wait_queue_head_t go; /* start decompression */
1120 wait_queue_head_t done; /* decompression done */
1121 size_t unc_len; /* uncompressed length */
1122 size_t cmp_len; /* compressed length */
1123 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1124 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1128 * Decompression function that runs in its own thread.
1130 static int lzo_decompress_threadfn(void *data)
1132 struct dec_data *d = data;
1135 wait_event(d->go, atomic_read(&d->ready) ||
1136 kthread_should_stop());
1137 if (kthread_should_stop()) {
1140 atomic_set(&d->stop, 1);
1144 atomic_set(&d->ready, 0);
1146 d->unc_len = LZO_UNC_SIZE;
1147 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1148 d->unc, &d->unc_len);
1149 if (clean_pages_on_decompress)
1150 flush_icache_range((unsigned long)d->unc,
1151 (unsigned long)d->unc + d->unc_len);
1153 atomic_set(&d->stop, 1);
1160 * load_image_lzo - Load compressed image data and decompress them with LZO.
1161 * @handle: Swap map handle to use for loading data.
1162 * @snapshot: Image to copy uncompressed data into.
1163 * @nr_to_read: Number of pages to load.
1165 static int load_image_lzo(struct swap_map_handle *handle,
1166 struct snapshot_handle *snapshot,
1167 unsigned int nr_to_read)
1172 struct hib_bio_batch hb;
1177 unsigned i, thr, run_threads, nr_threads;
1178 unsigned ring = 0, pg = 0, ring_size = 0,
1179 have = 0, want, need, asked = 0;
1180 unsigned long read_pages = 0;
1181 unsigned char **page = NULL;
1182 struct dec_data *data = NULL;
1183 struct crc_data *crc = NULL;
1185 hib_init_batch(&hb);
1188 * We'll limit the number of threads for decompression to limit memory
1191 nr_threads = num_online_cpus() - 1;
1192 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1194 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1196 pr_err("Failed to allocate LZO page\n");
1201 data = vzalloc(array_size(nr_threads, sizeof(*data)));
1203 pr_err("Failed to allocate LZO data\n");
1208 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1210 pr_err("Failed to allocate crc\n");
1215 clean_pages_on_decompress = true;
1218 * Start the decompression threads.
1220 for (thr = 0; thr < nr_threads; thr++) {
1221 init_waitqueue_head(&data[thr].go);
1222 init_waitqueue_head(&data[thr].done);
1224 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1226 "image_decompress/%u", thr);
1227 if (IS_ERR(data[thr].thr)) {
1228 data[thr].thr = NULL;
1229 pr_err("Cannot start decompression threads\n");
1236 * Start the CRC32 thread.
1238 init_waitqueue_head(&crc->go);
1239 init_waitqueue_head(&crc->done);
1242 crc->crc32 = &handle->crc32;
1243 for (thr = 0; thr < nr_threads; thr++) {
1244 crc->unc[thr] = data[thr].unc;
1245 crc->unc_len[thr] = &data[thr].unc_len;
1248 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1249 if (IS_ERR(crc->thr)) {
1251 pr_err("Cannot start CRC32 thread\n");
1257 * Set the number of pages for read buffering.
1258 * This is complete guesswork, because we'll only know the real
1259 * picture once prepare_image() is called, which is much later on
1260 * during the image load phase. We'll assume the worst case and
1261 * say that none of the image pages are from high memory.
1263 if (low_free_pages() > snapshot_get_image_size())
1264 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1265 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1267 for (i = 0; i < read_pages; i++) {
1268 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1269 GFP_NOIO | __GFP_HIGH :
1270 GFP_NOIO | __GFP_NOWARN |
1274 if (i < LZO_CMP_PAGES) {
1276 pr_err("Failed to allocate LZO pages\n");
1284 want = ring_size = i;
1286 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1287 pr_info("Loading and decompressing image data (%u pages)...\n",
1289 m = nr_to_read / 10;
1293 start = ktime_get();
1295 ret = snapshot_write_next(snapshot);
1300 for (i = 0; !eof && i < want; i++) {
1301 ret = swap_read_page(handle, page[ring], &hb);
1304 * On real read error, finish. On end of data,
1305 * set EOF flag and just exit the read loop.
1308 handle->cur->entries[handle->k]) {
1315 if (++ring >= ring_size)
1322 * We are out of data, wait for some more.
1328 ret = hib_wait_io(&hb);
1337 if (crc->run_threads) {
1338 wait_event(crc->done, atomic_read(&crc->stop));
1339 atomic_set(&crc->stop, 0);
1340 crc->run_threads = 0;
1343 for (thr = 0; have && thr < nr_threads; thr++) {
1344 data[thr].cmp_len = *(size_t *)page[pg];
1345 if (unlikely(!data[thr].cmp_len ||
1347 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1348 pr_err("Invalid LZO compressed length\n");
1353 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1364 off < LZO_HEADER + data[thr].cmp_len;
1366 memcpy(data[thr].cmp + off,
1367 page[pg], PAGE_SIZE);
1370 if (++pg >= ring_size)
1374 atomic_set(&data[thr].ready, 1);
1375 wake_up(&data[thr].go);
1379 * Wait for more data while we are decompressing.
1381 if (have < LZO_CMP_PAGES && asked) {
1382 ret = hib_wait_io(&hb);
1391 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1392 wait_event(data[thr].done,
1393 atomic_read(&data[thr].stop));
1394 atomic_set(&data[thr].stop, 0);
1396 ret = data[thr].ret;
1399 pr_err("LZO decompression failed\n");
1403 if (unlikely(!data[thr].unc_len ||
1404 data[thr].unc_len > LZO_UNC_SIZE ||
1405 data[thr].unc_len & (PAGE_SIZE - 1))) {
1406 pr_err("Invalid LZO uncompressed length\n");
1412 off < data[thr].unc_len; off += PAGE_SIZE) {
1413 memcpy(data_of(*snapshot),
1414 data[thr].unc + off, PAGE_SIZE);
1416 if (!(nr_pages % m))
1417 pr_info("Image loading progress: %3d%%\n",
1421 ret = snapshot_write_next(snapshot);
1423 crc->run_threads = thr + 1;
1424 atomic_set(&crc->ready, 1);
1431 crc->run_threads = thr;
1432 atomic_set(&crc->ready, 1);
1437 if (crc->run_threads) {
1438 wait_event(crc->done, atomic_read(&crc->stop));
1439 atomic_set(&crc->stop, 0);
1443 pr_info("Image loading done\n");
1444 snapshot_write_finalize(snapshot);
1445 if (!snapshot_image_loaded(snapshot))
1448 if (swsusp_header->flags & SF_CRC32_MODE) {
1449 if(handle->crc32 != swsusp_header->crc32) {
1450 pr_err("Invalid image CRC32!\n");
1456 swsusp_show_speed(start, stop, nr_to_read, "Read");
1458 hib_finish_batch(&hb);
1459 for (i = 0; i < ring_size; i++)
1460 free_page((unsigned long)page[i]);
1463 kthread_stop(crc->thr);
1467 for (thr = 0; thr < nr_threads; thr++)
1469 kthread_stop(data[thr].thr);
1478 * swsusp_read - read the hibernation image.
1479 * @flags_p: flags passed by the "frozen" kernel in the image header should
1480 * be written into this memory location
1483 int swsusp_read(unsigned int *flags_p)
1486 struct swap_map_handle handle;
1487 struct snapshot_handle snapshot;
1488 struct swsusp_info *header;
1490 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1491 error = snapshot_write_next(&snapshot);
1492 if (error < (int)PAGE_SIZE)
1493 return error < 0 ? error : -EFAULT;
1494 header = (struct swsusp_info *)data_of(snapshot);
1495 error = get_swap_reader(&handle, flags_p);
1499 error = swap_read_page(&handle, header, NULL);
1501 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1502 load_image(&handle, &snapshot, header->pages - 1) :
1503 load_image_lzo(&handle, &snapshot, header->pages - 1);
1505 swap_reader_finish(&handle);
1508 pr_debug("Image successfully loaded\n");
1510 pr_debug("Error %d resuming\n", error);
1514 static void *swsusp_holder;
1517 * swsusp_check - Open the resume device and check for the swsusp signature.
1518 * @exclusive: Open the resume device exclusively.
1521 int swsusp_check(bool exclusive)
1523 void *holder = exclusive ? &swsusp_holder : NULL;
1526 hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
1527 BLK_OPEN_READ, holder, NULL);
1528 if (!IS_ERR(hib_resume_bdev_handle)) {
1529 set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
1530 clear_page(swsusp_header);
1531 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1532 swsusp_header, NULL);
1536 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1537 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1538 /* Reset swap signature now */
1539 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1540 swsusp_resume_block,
1541 swsusp_header, NULL);
1545 if (!error && swsusp_header->flags & SF_HW_SIG &&
1546 swsusp_header->hw_sig != swsusp_hardware_signature) {
1547 pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1548 swsusp_header->hw_sig, swsusp_hardware_signature);
1554 bdev_release(hib_resume_bdev_handle);
1556 pr_debug("Image signature found, resuming\n");
1558 error = PTR_ERR(hib_resume_bdev_handle);
1562 pr_debug("Image not found (code %d)\n", error);
1568 * swsusp_close - close resume device.
1569 * @exclusive: Close the resume device which is exclusively opened.
1572 void swsusp_close(void)
1574 if (IS_ERR(hib_resume_bdev_handle)) {
1575 pr_debug("Image device not initialised\n");
1579 bdev_release(hib_resume_bdev_handle);
1583 * swsusp_unmark - Unmark swsusp signature in the resume device
1586 #ifdef CONFIG_SUSPEND
1587 int swsusp_unmark(void)
1591 hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1592 swsusp_header, NULL);
1593 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1594 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1595 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1596 swsusp_resume_block,
1597 swsusp_header, NULL);
1599 pr_err("Cannot find swsusp signature!\n");
1604 * We just returned from suspend, we don't need the image any more.
1606 free_all_swap_pages(root_swap);
1612 static int __init swsusp_header_init(void)
1614 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1616 panic("Could not allocate memory for swsusp_header\n");
1620 core_initcall(swsusp_header_init);