2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #define pr_fmt(fmt) "PM: " fmt
15 #include <linux/version.h>
16 #include <linux/module.h>
18 #include <linux/suspend.h>
19 #include <linux/delay.h>
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/kernel.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/bootmem.h>
27 #include <linux/nmi.h>
28 #include <linux/syscalls.h>
29 #include <linux/console.h>
30 #include <linux/highmem.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
33 #include <linux/compiler.h>
34 #include <linux/ktime.h>
35 #include <linux/set_memory.h>
37 #include <linux/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/tlbflush.h>
45 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
46 static bool hibernate_restore_protection;
47 static bool hibernate_restore_protection_active;
49 void enable_restore_image_protection(void)
51 hibernate_restore_protection = true;
54 static inline void hibernate_restore_protection_begin(void)
56 hibernate_restore_protection_active = hibernate_restore_protection;
59 static inline void hibernate_restore_protection_end(void)
61 hibernate_restore_protection_active = false;
64 static inline void hibernate_restore_protect_page(void *page_address)
66 if (hibernate_restore_protection_active)
67 set_memory_ro((unsigned long)page_address, 1);
70 static inline void hibernate_restore_unprotect_page(void *page_address)
72 if (hibernate_restore_protection_active)
73 set_memory_rw((unsigned long)page_address, 1);
76 static inline void hibernate_restore_protection_begin(void) {}
77 static inline void hibernate_restore_protection_end(void) {}
78 static inline void hibernate_restore_protect_page(void *page_address) {}
79 static inline void hibernate_restore_unprotect_page(void *page_address) {}
80 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
82 static int swsusp_page_is_free(struct page *);
83 static void swsusp_set_page_forbidden(struct page *);
84 static void swsusp_unset_page_forbidden(struct page *);
87 * Number of bytes to reserve for memory allocations made by device drivers
88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
89 * cause image creation to fail (tunable via /sys/power/reserved_size).
91 unsigned long reserved_size;
93 void __init hibernate_reserved_size_init(void)
95 reserved_size = SPARE_PAGES * PAGE_SIZE;
99 * Preferred image size in bytes (tunable via /sys/power/image_size).
100 * When it is set to N, swsusp will do its best to ensure the image
101 * size will not exceed N bytes, but if that is impossible, it will
102 * try to create the smallest image possible.
104 unsigned long image_size;
106 void __init hibernate_image_size_init(void)
108 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
112 * List of PBEs needed for restoring the pages that were allocated before
113 * the suspend and included in the suspend image, but have also been
114 * allocated by the "resume" kernel, so their contents cannot be written
115 * directly to their "original" page frames.
117 struct pbe *restore_pblist;
119 /* struct linked_page is used to build chains of pages */
121 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
124 struct linked_page *next;
125 char data[LINKED_PAGE_DATA_SIZE];
129 * List of "safe" pages (ie. pages that were not used by the image kernel
130 * before hibernation) that may be used as temporary storage for image kernel
133 static struct linked_page *safe_pages_list;
135 /* Pointer to an auxiliary buffer (1 page) */
140 #define PG_UNSAFE_CLEAR 1
141 #define PG_UNSAFE_KEEP 0
143 static unsigned int allocated_unsafe_pages;
146 * get_image_page - Allocate a page for a hibernation image.
147 * @gfp_mask: GFP mask for the allocation.
148 * @safe_needed: Get pages that were not used before hibernation (restore only)
150 * During image restoration, for storing the PBE list and the image data, we can
151 * only use memory pages that do not conflict with the pages used before
152 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
153 * using allocated_unsafe_pages.
155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
156 * swsusp_free() can release it.
158 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
162 res = (void *)get_zeroed_page(gfp_mask);
164 while (res && swsusp_page_is_free(virt_to_page(res))) {
165 /* The page is unsafe, mark it for swsusp_free() */
166 swsusp_set_page_forbidden(virt_to_page(res));
167 allocated_unsafe_pages++;
168 res = (void *)get_zeroed_page(gfp_mask);
171 swsusp_set_page_forbidden(virt_to_page(res));
172 swsusp_set_page_free(virt_to_page(res));
177 static void *__get_safe_page(gfp_t gfp_mask)
179 if (safe_pages_list) {
180 void *ret = safe_pages_list;
182 safe_pages_list = safe_pages_list->next;
183 memset(ret, 0, PAGE_SIZE);
186 return get_image_page(gfp_mask, PG_SAFE);
189 unsigned long get_safe_page(gfp_t gfp_mask)
191 return (unsigned long)__get_safe_page(gfp_mask);
194 static struct page *alloc_image_page(gfp_t gfp_mask)
198 page = alloc_page(gfp_mask);
200 swsusp_set_page_forbidden(page);
201 swsusp_set_page_free(page);
206 static void recycle_safe_page(void *page_address)
208 struct linked_page *lp = page_address;
210 lp->next = safe_pages_list;
211 safe_pages_list = lp;
215 * free_image_page - Free a page allocated for hibernation image.
216 * @addr: Address of the page to free.
217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
219 * The page to free should have been allocated by get_image_page() (page flags
220 * set by it are affected).
222 static inline void free_image_page(void *addr, int clear_nosave_free)
226 BUG_ON(!virt_addr_valid(addr));
228 page = virt_to_page(addr);
230 swsusp_unset_page_forbidden(page);
231 if (clear_nosave_free)
232 swsusp_unset_page_free(page);
237 static inline void free_list_of_pages(struct linked_page *list,
238 int clear_page_nosave)
241 struct linked_page *lp = list->next;
243 free_image_page(list, clear_page_nosave);
249 * struct chain_allocator is used for allocating small objects out of
250 * a linked list of pages called 'the chain'.
252 * The chain grows each time when there is no room for a new object in
253 * the current page. The allocated objects cannot be freed individually.
254 * It is only possible to free them all at once, by freeing the entire
257 * NOTE: The chain allocator may be inefficient if the allocated objects
258 * are not much smaller than PAGE_SIZE.
260 struct chain_allocator {
261 struct linked_page *chain; /* the chain */
262 unsigned int used_space; /* total size of objects allocated out
263 of the current page */
264 gfp_t gfp_mask; /* mask for allocating pages */
265 int safe_needed; /* if set, only "safe" pages are allocated */
268 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
272 ca->used_space = LINKED_PAGE_DATA_SIZE;
273 ca->gfp_mask = gfp_mask;
274 ca->safe_needed = safe_needed;
277 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282 struct linked_page *lp;
284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285 get_image_page(ca->gfp_mask, PG_ANY);
289 lp->next = ca->chain;
293 ret = ca->chain->data + ca->used_space;
294 ca->used_space += size;
299 * Data types related to memory bitmaps.
301 * Memory bitmap is a structure consiting of many linked lists of
302 * objects. The main list's elements are of type struct zone_bitmap
303 * and each of them corresonds to one zone. For each zone bitmap
304 * object there is a list of objects of type struct bm_block that
305 * represent each blocks of bitmap in which information is stored.
307 * struct memory_bitmap contains a pointer to the main list of zone
308 * bitmap objects, a struct bm_position used for browsing the bitmap,
309 * and a pointer to the list of pages used for allocating all of the
310 * zone bitmap objects and bitmap block objects.
312 * NOTE: It has to be possible to lay out the bitmap in memory
313 * using only allocations of order 0. Additionally, the bitmap is
314 * designed to work with arbitrary number of zones (this is over the
315 * top for now, but let's avoid making unnecessary assumptions ;-).
317 * struct zone_bitmap contains a pointer to a list of bitmap block
318 * objects and a pointer to the bitmap block object that has been
319 * most recently used for setting bits. Additionally, it contains the
320 * PFNs that correspond to the start and end of the represented zone.
322 * struct bm_block contains a pointer to the memory page in which
323 * information is stored (in the form of a block of bitmap)
324 * It also contains the pfns that correspond to the start and end of
325 * the represented memory area.
327 * The memory bitmap is organized as a radix tree to guarantee fast random
328 * access to the bits. There is one radix tree for each zone (as returned
329 * from create_mem_extents).
331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
332 * two linked lists for the nodes of the tree, one for the inner nodes and
333 * one for the leave nodes. The linked leave nodes are used for fast linear
334 * access of the memory bitmap.
336 * The struct rtree_node represents one node of the radix tree.
339 #define BM_END_OF_MAP (~0UL)
341 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
342 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
343 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
346 * struct rtree_node is a wrapper struct to link the nodes
347 * of the rtree together for easy linear iteration over
348 * bits and easy freeing
351 struct list_head list;
356 * struct mem_zone_bm_rtree represents a bitmap used for one
357 * populated memory zone.
359 struct mem_zone_bm_rtree {
360 struct list_head list; /* Link Zones together */
361 struct list_head nodes; /* Radix Tree inner nodes */
362 struct list_head leaves; /* Radix Tree leaves */
363 unsigned long start_pfn; /* Zone start page frame */
364 unsigned long end_pfn; /* Zone end page frame + 1 */
365 struct rtree_node *rtree; /* Radix Tree Root */
366 int levels; /* Number of Radix Tree Levels */
367 unsigned int blocks; /* Number of Bitmap Blocks */
370 /* strcut bm_position is used for browsing memory bitmaps */
373 struct mem_zone_bm_rtree *zone;
374 struct rtree_node *node;
375 unsigned long node_pfn;
379 struct memory_bitmap {
380 struct list_head zones;
381 struct linked_page *p_list; /* list of pages used to store zone
382 bitmap objects and bitmap block
384 struct bm_position cur; /* most recently used bit position */
387 /* Functions that operate on memory bitmaps */
389 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
390 #if BITS_PER_LONG == 32
391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
393 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
395 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
398 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
400 * This function is used to allocate inner nodes as well as the
401 * leave nodes of the radix tree. It also adds the node to the
402 * corresponding linked list passed in by the *list parameter.
404 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405 struct chain_allocator *ca,
406 struct list_head *list)
408 struct rtree_node *node;
410 node = chain_alloc(ca, sizeof(struct rtree_node));
414 node->data = get_image_page(gfp_mask, safe_needed);
418 list_add_tail(&node->list, list);
424 * add_rtree_block - Add a new leave node to the radix tree.
426 * The leave nodes need to be allocated in order to keep the leaves
427 * linked list in order. This is guaranteed by the zone->blocks
430 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431 int safe_needed, struct chain_allocator *ca)
433 struct rtree_node *node, *block, **dst;
434 unsigned int levels_needed, block_nr;
437 block_nr = zone->blocks;
440 /* How many levels do we need for this block nr? */
443 block_nr >>= BM_RTREE_LEVEL_SHIFT;
446 /* Make sure the rtree has enough levels */
447 for (i = zone->levels; i < levels_needed; i++) {
448 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
453 node->data[0] = (unsigned long)zone->rtree;
458 /* Allocate new block */
459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
463 /* Now walk the rtree to insert the block */
466 block_nr = zone->blocks;
467 for (i = zone->levels; i > 0; i--) {
471 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479 index &= BM_RTREE_LEVEL_MASK;
480 dst = (struct rtree_node **)&((*dst)->data[index]);
490 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491 int clear_nosave_free);
494 * create_zone_bm_rtree - Create a radix tree for one zone.
496 * Allocated the mem_zone_bm_rtree structure and initializes it.
497 * This function also allocated and builds the radix tree for the
500 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
502 struct chain_allocator *ca,
506 struct mem_zone_bm_rtree *zone;
507 unsigned int i, nr_blocks;
511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
515 INIT_LIST_HEAD(&zone->nodes);
516 INIT_LIST_HEAD(&zone->leaves);
517 zone->start_pfn = start;
519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
521 for (i = 0; i < nr_blocks; i++) {
522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
532 * free_zone_bm_rtree - Free the memory of the radix tree.
534 * Free all node pages of the radix tree. The mem_zone_bm_rtree
535 * structure itself is not freed here nor are the rtree_node
538 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539 int clear_nosave_free)
541 struct rtree_node *node;
543 list_for_each_entry(node, &zone->nodes, list)
544 free_image_page(node->data, clear_nosave_free);
546 list_for_each_entry(node, &zone->leaves, list)
547 free_image_page(node->data, clear_nosave_free);
550 static void memory_bm_position_reset(struct memory_bitmap *bm)
552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
554 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555 struct rtree_node, list);
556 bm->cur.node_pfn = 0;
557 bm->cur.node_bit = 0;
560 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
563 struct list_head hook;
569 * free_mem_extents - Free a list of memory extents.
570 * @list: List of extents to free.
572 static void free_mem_extents(struct list_head *list)
574 struct mem_extent *ext, *aux;
576 list_for_each_entry_safe(ext, aux, list, hook) {
577 list_del(&ext->hook);
583 * create_mem_extents - Create a list of memory extents.
584 * @list: List to put the extents into.
585 * @gfp_mask: Mask to use for memory allocations.
587 * The extents represent contiguous ranges of PFNs.
589 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
593 INIT_LIST_HEAD(list);
595 for_each_populated_zone(zone) {
596 unsigned long zone_start, zone_end;
597 struct mem_extent *ext, *cur, *aux;
599 zone_start = zone->zone_start_pfn;
600 zone_end = zone_end_pfn(zone);
602 list_for_each_entry(ext, list, hook)
603 if (zone_start <= ext->end)
606 if (&ext->hook == list || zone_end < ext->start) {
607 /* New extent is necessary */
608 struct mem_extent *new_ext;
610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
612 free_mem_extents(list);
615 new_ext->start = zone_start;
616 new_ext->end = zone_end;
617 list_add_tail(&new_ext->hook, &ext->hook);
621 /* Merge this zone's range of PFNs with the existing one */
622 if (zone_start < ext->start)
623 ext->start = zone_start;
624 if (zone_end > ext->end)
627 /* More merging may be possible */
629 list_for_each_entry_safe_continue(cur, aux, list, hook) {
630 if (zone_end < cur->start)
632 if (zone_end < cur->end)
634 list_del(&cur->hook);
643 * memory_bm_create - Allocate memory for a memory bitmap.
645 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
648 struct chain_allocator ca;
649 struct list_head mem_extents;
650 struct mem_extent *ext;
653 chain_init(&ca, gfp_mask, safe_needed);
654 INIT_LIST_HEAD(&bm->zones);
656 error = create_mem_extents(&mem_extents, gfp_mask);
660 list_for_each_entry(ext, &mem_extents, hook) {
661 struct mem_zone_bm_rtree *zone;
663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664 ext->start, ext->end);
669 list_add_tail(&zone->list, &bm->zones);
672 bm->p_list = ca.chain;
673 memory_bm_position_reset(bm);
675 free_mem_extents(&mem_extents);
679 bm->p_list = ca.chain;
680 memory_bm_free(bm, PG_UNSAFE_CLEAR);
685 * memory_bm_free - Free memory occupied by the memory bitmap.
686 * @bm: Memory bitmap.
688 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
690 struct mem_zone_bm_rtree *zone;
692 list_for_each_entry(zone, &bm->zones, list)
693 free_zone_bm_rtree(zone, clear_nosave_free);
695 free_list_of_pages(bm->p_list, clear_nosave_free);
697 INIT_LIST_HEAD(&bm->zones);
701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
703 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
706 * Walk the radix tree to find the page containing the bit that represents @pfn
707 * and return the position of the bit in @addr and @bit_nr.
709 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710 void **addr, unsigned int *bit_nr)
712 struct mem_zone_bm_rtree *curr, *zone;
713 struct rtree_node *node;
718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
723 /* Find the right zone */
724 list_for_each_entry(curr, &bm->zones, list) {
725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
736 * We have found the zone. Now walk the radix tree to find the leaf node
741 * If the zone we wish to scan is the the current zone and the
742 * pfn falls into the current node then we do not need to walk
746 if (zone == bm->cur.zone &&
747 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
751 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
753 for (i = zone->levels; i > 0; i--) {
756 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
757 index &= BM_RTREE_LEVEL_MASK;
758 BUG_ON(node->data[index] == 0);
759 node = (struct rtree_node *)node->data[index];
763 /* Update last position */
766 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
768 /* Set return values */
770 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
775 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
781 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
786 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
792 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
805 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
807 clear_bit(bit, addr);
810 static void memory_bm_clear_current(struct memory_bitmap *bm)
814 bit = max(bm->cur.node_bit - 1, 0);
815 clear_bit(bit, bm->cur.node->data);
818 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
824 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
826 return test_bit(bit, addr);
829 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
834 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
838 * rtree_next_node - Jump to the next leaf node.
840 * Set the position to the beginning of the next node in the
841 * memory bitmap. This is either the next node in the current
842 * zone's radix tree or the first node in the radix tree of the
845 * Return true if there is a next node, false otherwise.
847 static bool rtree_next_node(struct memory_bitmap *bm)
849 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
850 bm->cur.node = list_entry(bm->cur.node->list.next,
851 struct rtree_node, list);
852 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
853 bm->cur.node_bit = 0;
854 touch_softlockup_watchdog();
858 /* No more nodes, goto next zone */
859 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
860 bm->cur.zone = list_entry(bm->cur.zone->list.next,
861 struct mem_zone_bm_rtree, list);
862 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
863 struct rtree_node, list);
864 bm->cur.node_pfn = 0;
865 bm->cur.node_bit = 0;
874 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
875 * @bm: Memory bitmap.
877 * Starting from the last returned position this function searches for the next
878 * set bit in @bm and returns the PFN represented by it. If no more bits are
879 * set, BM_END_OF_MAP is returned.
881 * It is required to run memory_bm_position_reset() before the first call to
882 * this function for the given memory bitmap.
884 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
886 unsigned long bits, pfn, pages;
890 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
891 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
892 bit = find_next_bit(bm->cur.node->data, bits,
895 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
896 bm->cur.node_bit = bit + 1;
899 } while (rtree_next_node(bm));
901 return BM_END_OF_MAP;
905 * This structure represents a range of page frames the contents of which
906 * should not be saved during hibernation.
908 struct nosave_region {
909 struct list_head list;
910 unsigned long start_pfn;
911 unsigned long end_pfn;
914 static LIST_HEAD(nosave_regions);
916 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
918 struct rtree_node *node;
920 list_for_each_entry(node, &zone->nodes, list)
921 recycle_safe_page(node->data);
923 list_for_each_entry(node, &zone->leaves, list)
924 recycle_safe_page(node->data);
927 static void memory_bm_recycle(struct memory_bitmap *bm)
929 struct mem_zone_bm_rtree *zone;
930 struct linked_page *p_list;
932 list_for_each_entry(zone, &bm->zones, list)
933 recycle_zone_bm_rtree(zone);
937 struct linked_page *lp = p_list;
940 recycle_safe_page(lp);
945 * register_nosave_region - Register a region of unsaveable memory.
947 * Register a range of page frames the contents of which should not be saved
948 * during hibernation (to be used in the early initialization code).
950 void __init __register_nosave_region(unsigned long start_pfn,
951 unsigned long end_pfn, int use_kmalloc)
953 struct nosave_region *region;
955 if (start_pfn >= end_pfn)
958 if (!list_empty(&nosave_regions)) {
959 /* Try to extend the previous region (they should be sorted) */
960 region = list_entry(nosave_regions.prev,
961 struct nosave_region, list);
962 if (region->end_pfn == start_pfn) {
963 region->end_pfn = end_pfn;
968 /* During init, this shouldn't fail */
969 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
972 /* This allocation cannot fail */
973 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
975 region->start_pfn = start_pfn;
976 region->end_pfn = end_pfn;
977 list_add_tail(®ion->list, &nosave_regions);
979 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
980 (unsigned long long) start_pfn << PAGE_SHIFT,
981 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
985 * Set bits in this map correspond to the page frames the contents of which
986 * should not be saved during the suspend.
988 static struct memory_bitmap *forbidden_pages_map;
990 /* Set bits in this map correspond to free page frames. */
991 static struct memory_bitmap *free_pages_map;
994 * Each page frame allocated for creating the image is marked by setting the
995 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
998 void swsusp_set_page_free(struct page *page)
1001 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1004 static int swsusp_page_is_free(struct page *page)
1006 return free_pages_map ?
1007 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1010 void swsusp_unset_page_free(struct page *page)
1013 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1016 static void swsusp_set_page_forbidden(struct page *page)
1018 if (forbidden_pages_map)
1019 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1022 int swsusp_page_is_forbidden(struct page *page)
1024 return forbidden_pages_map ?
1025 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1028 static void swsusp_unset_page_forbidden(struct page *page)
1030 if (forbidden_pages_map)
1031 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1035 * mark_nosave_pages - Mark pages that should not be saved.
1036 * @bm: Memory bitmap.
1038 * Set the bits in @bm that correspond to the page frames the contents of which
1039 * should not be saved.
1041 static void mark_nosave_pages(struct memory_bitmap *bm)
1043 struct nosave_region *region;
1045 if (list_empty(&nosave_regions))
1048 list_for_each_entry(region, &nosave_regions, list) {
1051 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1052 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1053 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1056 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1057 if (pfn_valid(pfn)) {
1059 * It is safe to ignore the result of
1060 * mem_bm_set_bit_check() here, since we won't
1061 * touch the PFNs for which the error is
1064 mem_bm_set_bit_check(bm, pfn);
1070 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1072 * Create bitmaps needed for marking page frames that should not be saved and
1073 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1074 * only modified if everything goes well, because we don't want the bits to be
1075 * touched before both bitmaps are set up.
1077 int create_basic_memory_bitmaps(void)
1079 struct memory_bitmap *bm1, *bm2;
1082 if (forbidden_pages_map && free_pages_map)
1085 BUG_ON(forbidden_pages_map || free_pages_map);
1087 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1091 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1093 goto Free_first_object;
1095 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1097 goto Free_first_bitmap;
1099 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1101 goto Free_second_object;
1103 forbidden_pages_map = bm1;
1104 free_pages_map = bm2;
1105 mark_nosave_pages(forbidden_pages_map);
1107 pr_debug("Basic memory bitmaps created\n");
1114 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1121 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1123 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1124 * auxiliary pointers are necessary so that the bitmaps themselves are not
1125 * referred to while they are being freed.
1127 void free_basic_memory_bitmaps(void)
1129 struct memory_bitmap *bm1, *bm2;
1131 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1134 bm1 = forbidden_pages_map;
1135 bm2 = free_pages_map;
1136 forbidden_pages_map = NULL;
1137 free_pages_map = NULL;
1138 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1140 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1143 pr_debug("Basic memory bitmaps freed\n");
1146 void clear_free_pages(void)
1148 #ifdef CONFIG_PAGE_POISONING_ZERO
1149 struct memory_bitmap *bm = free_pages_map;
1152 if (WARN_ON(!(free_pages_map)))
1155 memory_bm_position_reset(bm);
1156 pfn = memory_bm_next_pfn(bm);
1157 while (pfn != BM_END_OF_MAP) {
1159 clear_highpage(pfn_to_page(pfn));
1161 pfn = memory_bm_next_pfn(bm);
1163 memory_bm_position_reset(bm);
1164 pr_info("free pages cleared after restore\n");
1165 #endif /* PAGE_POISONING_ZERO */
1169 * snapshot_additional_pages - Estimate the number of extra pages needed.
1170 * @zone: Memory zone to carry out the computation for.
1172 * Estimate the number of additional pages needed for setting up a hibernation
1173 * image data structures for @zone (usually, the returned value is greater than
1174 * the exact number).
1176 unsigned int snapshot_additional_pages(struct zone *zone)
1178 unsigned int rtree, nodes;
1180 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1181 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1182 LINKED_PAGE_DATA_SIZE);
1184 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1191 #ifdef CONFIG_HIGHMEM
1193 * count_free_highmem_pages - Compute the total number of free highmem pages.
1195 * The returned number is system-wide.
1197 static unsigned int count_free_highmem_pages(void)
1200 unsigned int cnt = 0;
1202 for_each_populated_zone(zone)
1203 if (is_highmem(zone))
1204 cnt += zone_page_state(zone, NR_FREE_PAGES);
1210 * saveable_highmem_page - Check if a highmem page is saveable.
1212 * Determine whether a highmem page should be included in a hibernation image.
1214 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1215 * and it isn't part of a free chunk of pages.
1217 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1221 if (!pfn_valid(pfn))
1224 page = pfn_to_page(pfn);
1225 if (page_zone(page) != zone)
1228 BUG_ON(!PageHighMem(page));
1230 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1234 if (page_is_guard(page))
1241 * count_highmem_pages - Compute the total number of saveable highmem pages.
1243 static unsigned int count_highmem_pages(void)
1248 for_each_populated_zone(zone) {
1249 unsigned long pfn, max_zone_pfn;
1251 if (!is_highmem(zone))
1254 mark_free_pages(zone);
1255 max_zone_pfn = zone_end_pfn(zone);
1256 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1257 if (saveable_highmem_page(zone, pfn))
1263 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1267 #endif /* CONFIG_HIGHMEM */
1270 * saveable_page - Check if the given page is saveable.
1272 * Determine whether a non-highmem page should be included in a hibernation
1275 * We should save the page if it isn't Nosave, and is not in the range
1276 * of pages statically defined as 'unsaveable', and it isn't part of
1277 * a free chunk of pages.
1279 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1283 if (!pfn_valid(pfn))
1286 page = pfn_to_page(pfn);
1287 if (page_zone(page) != zone)
1290 BUG_ON(PageHighMem(page));
1292 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1295 if (PageReserved(page)
1296 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1299 if (page_is_guard(page))
1306 * count_data_pages - Compute the total number of saveable non-highmem pages.
1308 static unsigned int count_data_pages(void)
1311 unsigned long pfn, max_zone_pfn;
1314 for_each_populated_zone(zone) {
1315 if (is_highmem(zone))
1318 mark_free_pages(zone);
1319 max_zone_pfn = zone_end_pfn(zone);
1320 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1321 if (saveable_page(zone, pfn))
1328 * This is needed, because copy_page and memcpy are not usable for copying
1331 static inline void do_copy_page(long *dst, long *src)
1335 for (n = PAGE_SIZE / sizeof(long); n; n--)
1340 * safe_copy_page - Copy a page in a safe way.
1342 * Check if the page we are going to copy is marked as present in the kernel
1343 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1344 * and in that case kernel_page_present() always returns 'true').
1346 static void safe_copy_page(void *dst, struct page *s_page)
1348 if (kernel_page_present(s_page)) {
1349 do_copy_page(dst, page_address(s_page));
1351 kernel_map_pages(s_page, 1, 1);
1352 do_copy_page(dst, page_address(s_page));
1353 kernel_map_pages(s_page, 1, 0);
1357 #ifdef CONFIG_HIGHMEM
1358 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1360 return is_highmem(zone) ?
1361 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1364 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1366 struct page *s_page, *d_page;
1369 s_page = pfn_to_page(src_pfn);
1370 d_page = pfn_to_page(dst_pfn);
1371 if (PageHighMem(s_page)) {
1372 src = kmap_atomic(s_page);
1373 dst = kmap_atomic(d_page);
1374 do_copy_page(dst, src);
1378 if (PageHighMem(d_page)) {
1380 * The page pointed to by src may contain some kernel
1381 * data modified by kmap_atomic()
1383 safe_copy_page(buffer, s_page);
1384 dst = kmap_atomic(d_page);
1385 copy_page(dst, buffer);
1388 safe_copy_page(page_address(d_page), s_page);
1393 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1395 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1397 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1398 pfn_to_page(src_pfn));
1400 #endif /* CONFIG_HIGHMEM */
1402 static void copy_data_pages(struct memory_bitmap *copy_bm,
1403 struct memory_bitmap *orig_bm)
1408 for_each_populated_zone(zone) {
1409 unsigned long max_zone_pfn;
1411 mark_free_pages(zone);
1412 max_zone_pfn = zone_end_pfn(zone);
1413 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1414 if (page_is_saveable(zone, pfn))
1415 memory_bm_set_bit(orig_bm, pfn);
1417 memory_bm_position_reset(orig_bm);
1418 memory_bm_position_reset(copy_bm);
1420 pfn = memory_bm_next_pfn(orig_bm);
1421 if (unlikely(pfn == BM_END_OF_MAP))
1423 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1427 /* Total number of image pages */
1428 static unsigned int nr_copy_pages;
1429 /* Number of pages needed for saving the original pfns of the image pages */
1430 static unsigned int nr_meta_pages;
1432 * Numbers of normal and highmem page frames allocated for hibernation image
1433 * before suspending devices.
1435 static unsigned int alloc_normal, alloc_highmem;
1437 * Memory bitmap used for marking saveable pages (during hibernation) or
1438 * hibernation image pages (during restore)
1440 static struct memory_bitmap orig_bm;
1442 * Memory bitmap used during hibernation for marking allocated page frames that
1443 * will contain copies of saveable pages. During restore it is initially used
1444 * for marking hibernation image pages, but then the set bits from it are
1445 * duplicated in @orig_bm and it is released. On highmem systems it is next
1446 * used for marking "safe" highmem pages, but it has to be reinitialized for
1449 static struct memory_bitmap copy_bm;
1452 * swsusp_free - Free pages allocated for hibernation image.
1454 * Image pages are alocated before snapshot creation, so they need to be
1455 * released after resume.
1457 void swsusp_free(void)
1459 unsigned long fb_pfn, fr_pfn;
1461 if (!forbidden_pages_map || !free_pages_map)
1464 memory_bm_position_reset(forbidden_pages_map);
1465 memory_bm_position_reset(free_pages_map);
1468 fr_pfn = memory_bm_next_pfn(free_pages_map);
1469 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1472 * Find the next bit set in both bitmaps. This is guaranteed to
1473 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1476 if (fb_pfn < fr_pfn)
1477 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1478 if (fr_pfn < fb_pfn)
1479 fr_pfn = memory_bm_next_pfn(free_pages_map);
1480 } while (fb_pfn != fr_pfn);
1482 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1483 struct page *page = pfn_to_page(fr_pfn);
1485 memory_bm_clear_current(forbidden_pages_map);
1486 memory_bm_clear_current(free_pages_map);
1487 hibernate_restore_unprotect_page(page_address(page));
1495 restore_pblist = NULL;
1499 hibernate_restore_protection_end();
1502 /* Helper functions used for the shrinking of memory. */
1504 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1507 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1508 * @nr_pages: Number of page frames to allocate.
1509 * @mask: GFP flags to use for the allocation.
1511 * Return value: Number of page frames actually allocated
1513 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1515 unsigned long nr_alloc = 0;
1517 while (nr_pages > 0) {
1520 page = alloc_image_page(mask);
1523 memory_bm_set_bit(©_bm, page_to_pfn(page));
1524 if (PageHighMem(page))
1535 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1536 unsigned long avail_normal)
1538 unsigned long alloc;
1540 if (avail_normal <= alloc_normal)
1543 alloc = avail_normal - alloc_normal;
1544 if (nr_pages < alloc)
1547 return preallocate_image_pages(alloc, GFP_IMAGE);
1550 #ifdef CONFIG_HIGHMEM
1551 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1553 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1557 * __fraction - Compute (an approximation of) x * (multiplier / base).
1559 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1563 return (unsigned long)x;
1566 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1567 unsigned long highmem,
1568 unsigned long total)
1570 unsigned long alloc = __fraction(nr_pages, highmem, total);
1572 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1574 #else /* CONFIG_HIGHMEM */
1575 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1580 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1581 unsigned long highmem,
1582 unsigned long total)
1586 #endif /* CONFIG_HIGHMEM */
1589 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1591 static unsigned long free_unnecessary_pages(void)
1593 unsigned long save, to_free_normal, to_free_highmem, free;
1595 save = count_data_pages();
1596 if (alloc_normal >= save) {
1597 to_free_normal = alloc_normal - save;
1601 save -= alloc_normal;
1603 save += count_highmem_pages();
1604 if (alloc_highmem >= save) {
1605 to_free_highmem = alloc_highmem - save;
1607 to_free_highmem = 0;
1608 save -= alloc_highmem;
1609 if (to_free_normal > save)
1610 to_free_normal -= save;
1614 free = to_free_normal + to_free_highmem;
1616 memory_bm_position_reset(©_bm);
1618 while (to_free_normal > 0 || to_free_highmem > 0) {
1619 unsigned long pfn = memory_bm_next_pfn(©_bm);
1620 struct page *page = pfn_to_page(pfn);
1622 if (PageHighMem(page)) {
1623 if (!to_free_highmem)
1628 if (!to_free_normal)
1633 memory_bm_clear_bit(©_bm, pfn);
1634 swsusp_unset_page_forbidden(page);
1635 swsusp_unset_page_free(page);
1643 * minimum_image_size - Estimate the minimum acceptable size of an image.
1644 * @saveable: Number of saveable pages in the system.
1646 * We want to avoid attempting to free too much memory too hard, so estimate the
1647 * minimum acceptable size of a hibernation image to use as the lower limit for
1648 * preallocating memory.
1650 * We assume that the minimum image size should be proportional to
1652 * [number of saveable pages] - [number of pages that can be freed in theory]
1654 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1655 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1657 static unsigned long minimum_image_size(unsigned long saveable)
1661 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1662 + global_node_page_state(NR_ACTIVE_ANON)
1663 + global_node_page_state(NR_INACTIVE_ANON)
1664 + global_node_page_state(NR_ACTIVE_FILE)
1665 + global_node_page_state(NR_INACTIVE_FILE);
1667 return saveable <= size ? 0 : saveable - size;
1671 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1673 * To create a hibernation image it is necessary to make a copy of every page
1674 * frame in use. We also need a number of page frames to be free during
1675 * hibernation for allocations made while saving the image and for device
1676 * drivers, in case they need to allocate memory from their hibernation
1677 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1678 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1679 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1680 * total number of available page frames and allocate at least
1682 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1683 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1685 * of them, which corresponds to the maximum size of a hibernation image.
1687 * If image_size is set below the number following from the above formula,
1688 * the preallocation of memory is continued until the total number of saveable
1689 * pages in the system is below the requested image size or the minimum
1690 * acceptable image size returned by minimum_image_size(), whichever is greater.
1692 int hibernate_preallocate_memory(void)
1695 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1696 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1697 ktime_t start, stop;
1700 pr_info("Preallocating image memory... ");
1701 start = ktime_get();
1703 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1707 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1714 /* Count the number of saveable data pages. */
1715 save_highmem = count_highmem_pages();
1716 saveable = count_data_pages();
1719 * Compute the total number of page frames we can use (count) and the
1720 * number of pages needed for image metadata (size).
1723 saveable += save_highmem;
1724 highmem = save_highmem;
1726 for_each_populated_zone(zone) {
1727 size += snapshot_additional_pages(zone);
1728 if (is_highmem(zone))
1729 highmem += zone_page_state(zone, NR_FREE_PAGES);
1731 count += zone_page_state(zone, NR_FREE_PAGES);
1733 avail_normal = count;
1735 count -= totalreserve_pages;
1737 /* Add number of pages required for page keys (s390 only). */
1738 size += page_key_additional_pages(saveable);
1740 /* Compute the maximum number of saveable pages to leave in memory. */
1741 max_size = (count - (size + PAGES_FOR_IO)) / 2
1742 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1743 /* Compute the desired number of image pages specified by image_size. */
1744 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1745 if (size > max_size)
1748 * If the desired number of image pages is at least as large as the
1749 * current number of saveable pages in memory, allocate page frames for
1750 * the image and we're done.
1752 if (size >= saveable) {
1753 pages = preallocate_image_highmem(save_highmem);
1754 pages += preallocate_image_memory(saveable - pages, avail_normal);
1758 /* Estimate the minimum size of the image. */
1759 pages = minimum_image_size(saveable);
1761 * To avoid excessive pressure on the normal zone, leave room in it to
1762 * accommodate an image of the minimum size (unless it's already too
1763 * small, in which case don't preallocate pages from it at all).
1765 if (avail_normal > pages)
1766 avail_normal -= pages;
1770 size = min_t(unsigned long, pages, max_size);
1773 * Let the memory management subsystem know that we're going to need a
1774 * large number of page frames to allocate and make it free some memory.
1775 * NOTE: If this is not done, performance will be hurt badly in some
1778 shrink_all_memory(saveable - size);
1781 * The number of saveable pages in memory was too high, so apply some
1782 * pressure to decrease it. First, make room for the largest possible
1783 * image and fail if that doesn't work. Next, try to decrease the size
1784 * of the image as much as indicated by 'size' using allocations from
1785 * highmem and non-highmem zones separately.
1787 pages_highmem = preallocate_image_highmem(highmem / 2);
1788 alloc = count - max_size;
1789 if (alloc > pages_highmem)
1790 alloc -= pages_highmem;
1793 pages = preallocate_image_memory(alloc, avail_normal);
1794 if (pages < alloc) {
1795 /* We have exhausted non-highmem pages, try highmem. */
1797 pages += pages_highmem;
1798 pages_highmem = preallocate_image_highmem(alloc);
1799 if (pages_highmem < alloc)
1801 pages += pages_highmem;
1803 * size is the desired number of saveable pages to leave in
1804 * memory, so try to preallocate (all memory - size) pages.
1806 alloc = (count - pages) - size;
1807 pages += preallocate_image_highmem(alloc);
1810 * There are approximately max_size saveable pages at this point
1811 * and we want to reduce this number down to size.
1813 alloc = max_size - size;
1814 size = preallocate_highmem_fraction(alloc, highmem, count);
1815 pages_highmem += size;
1817 size = preallocate_image_memory(alloc, avail_normal);
1818 pages_highmem += preallocate_image_highmem(alloc - size);
1819 pages += pages_highmem + size;
1823 * We only need as many page frames for the image as there are saveable
1824 * pages in memory, but we have allocated more. Release the excessive
1827 pages -= free_unnecessary_pages();
1831 pr_cont("done (allocated %lu pages)\n", pages);
1832 swsusp_show_speed(start, stop, pages, "Allocated");
1842 #ifdef CONFIG_HIGHMEM
1844 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1846 * Compute the number of non-highmem pages that will be necessary for creating
1847 * copies of highmem pages.
1849 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1851 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1853 if (free_highmem >= nr_highmem)
1856 nr_highmem -= free_highmem;
1861 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1862 #endif /* CONFIG_HIGHMEM */
1865 * enough_free_mem - Check if there is enough free memory for the image.
1867 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1870 unsigned int free = alloc_normal;
1872 for_each_populated_zone(zone)
1873 if (!is_highmem(zone))
1874 free += zone_page_state(zone, NR_FREE_PAGES);
1876 nr_pages += count_pages_for_highmem(nr_highmem);
1877 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1878 nr_pages, PAGES_FOR_IO, free);
1880 return free > nr_pages + PAGES_FOR_IO;
1883 #ifdef CONFIG_HIGHMEM
1885 * get_highmem_buffer - Allocate a buffer for highmem pages.
1887 * If there are some highmem pages in the hibernation image, we may need a
1888 * buffer to copy them and/or load their data.
1890 static inline int get_highmem_buffer(int safe_needed)
1892 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1893 return buffer ? 0 : -ENOMEM;
1897 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1899 * Try to allocate as many pages as needed, but if the number of free highmem
1900 * pages is less than that, allocate them all.
1902 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1903 unsigned int nr_highmem)
1905 unsigned int to_alloc = count_free_highmem_pages();
1907 if (to_alloc > nr_highmem)
1908 to_alloc = nr_highmem;
1910 nr_highmem -= to_alloc;
1911 while (to_alloc-- > 0) {
1914 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1915 memory_bm_set_bit(bm, page_to_pfn(page));
1920 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1922 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1923 unsigned int n) { return 0; }
1924 #endif /* CONFIG_HIGHMEM */
1927 * swsusp_alloc - Allocate memory for hibernation image.
1929 * We first try to allocate as many highmem pages as there are
1930 * saveable highmem pages in the system. If that fails, we allocate
1931 * non-highmem pages for the copies of the remaining highmem ones.
1933 * In this approach it is likely that the copies of highmem pages will
1934 * also be located in the high memory, because of the way in which
1935 * copy_data_pages() works.
1937 static int swsusp_alloc(struct memory_bitmap *copy_bm,
1938 unsigned int nr_pages, unsigned int nr_highmem)
1940 if (nr_highmem > 0) {
1941 if (get_highmem_buffer(PG_ANY))
1943 if (nr_highmem > alloc_highmem) {
1944 nr_highmem -= alloc_highmem;
1945 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1948 if (nr_pages > alloc_normal) {
1949 nr_pages -= alloc_normal;
1950 while (nr_pages-- > 0) {
1953 page = alloc_image_page(GFP_ATOMIC);
1956 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1967 asmlinkage __visible int swsusp_save(void)
1969 unsigned int nr_pages, nr_highmem;
1971 pr_info("Creating hibernation image:\n");
1973 drain_local_pages(NULL);
1974 nr_pages = count_data_pages();
1975 nr_highmem = count_highmem_pages();
1976 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1978 if (!enough_free_mem(nr_pages, nr_highmem)) {
1979 pr_err("Not enough free memory\n");
1983 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
1984 pr_err("Memory allocation failed\n");
1989 * During allocating of suspend pagedir, new cold pages may appear.
1992 drain_local_pages(NULL);
1993 copy_data_pages(©_bm, &orig_bm);
1996 * End of critical section. From now on, we can write to memory,
1997 * but we should not touch disk. This specially means we must _not_
1998 * touch swap space! Except we must write out our image of course.
2001 nr_pages += nr_highmem;
2002 nr_copy_pages = nr_pages;
2003 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2005 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2010 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2011 static int init_header_complete(struct swsusp_info *info)
2013 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2014 info->version_code = LINUX_VERSION_CODE;
2018 static char *check_image_kernel(struct swsusp_info *info)
2020 if (info->version_code != LINUX_VERSION_CODE)
2021 return "kernel version";
2022 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2023 return "system type";
2024 if (strcmp(info->uts.release,init_utsname()->release))
2025 return "kernel release";
2026 if (strcmp(info->uts.version,init_utsname()->version))
2028 if (strcmp(info->uts.machine,init_utsname()->machine))
2032 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2034 unsigned long snapshot_get_image_size(void)
2036 return nr_copy_pages + nr_meta_pages + 1;
2039 static int init_header(struct swsusp_info *info)
2041 memset(info, 0, sizeof(struct swsusp_info));
2042 info->num_physpages = get_num_physpages();
2043 info->image_pages = nr_copy_pages;
2044 info->pages = snapshot_get_image_size();
2045 info->size = info->pages;
2046 info->size <<= PAGE_SHIFT;
2047 return init_header_complete(info);
2051 * pack_pfns - Prepare PFNs for saving.
2052 * @bm: Memory bitmap.
2053 * @buf: Memory buffer to store the PFNs in.
2055 * PFNs corresponding to set bits in @bm are stored in the area of memory
2056 * pointed to by @buf (1 page at a time).
2058 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2062 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2063 buf[j] = memory_bm_next_pfn(bm);
2064 if (unlikely(buf[j] == BM_END_OF_MAP))
2066 /* Save page key for data page (s390 only). */
2067 page_key_read(buf + j);
2072 * snapshot_read_next - Get the address to read the next image page from.
2073 * @handle: Snapshot handle to be used for the reading.
2075 * On the first call, @handle should point to a zeroed snapshot_handle
2076 * structure. The structure gets populated then and a pointer to it should be
2077 * passed to this function every next time.
2079 * On success, the function returns a positive number. Then, the caller
2080 * is allowed to read up to the returned number of bytes from the memory
2081 * location computed by the data_of() macro.
2083 * The function returns 0 to indicate the end of the data stream condition,
2084 * and negative numbers are returned on errors. If that happens, the structure
2085 * pointed to by @handle is not updated and should not be used any more.
2087 int snapshot_read_next(struct snapshot_handle *handle)
2089 if (handle->cur > nr_meta_pages + nr_copy_pages)
2093 /* This makes the buffer be freed by swsusp_free() */
2094 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2101 error = init_header((struct swsusp_info *)buffer);
2104 handle->buffer = buffer;
2105 memory_bm_position_reset(&orig_bm);
2106 memory_bm_position_reset(©_bm);
2107 } else if (handle->cur <= nr_meta_pages) {
2109 pack_pfns(buffer, &orig_bm);
2113 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2114 if (PageHighMem(page)) {
2116 * Highmem pages are copied to the buffer,
2117 * because we can't return with a kmapped
2118 * highmem page (we may not be called again).
2122 kaddr = kmap_atomic(page);
2123 copy_page(buffer, kaddr);
2124 kunmap_atomic(kaddr);
2125 handle->buffer = buffer;
2127 handle->buffer = page_address(page);
2134 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2135 struct memory_bitmap *src)
2139 memory_bm_position_reset(src);
2140 pfn = memory_bm_next_pfn(src);
2141 while (pfn != BM_END_OF_MAP) {
2142 memory_bm_set_bit(dst, pfn);
2143 pfn = memory_bm_next_pfn(src);
2148 * mark_unsafe_pages - Mark pages that were used before hibernation.
2150 * Mark the pages that cannot be used for storing the image during restoration,
2151 * because they conflict with the pages that had been used before hibernation.
2153 static void mark_unsafe_pages(struct memory_bitmap *bm)
2157 /* Clear the "free"/"unsafe" bit for all PFNs */
2158 memory_bm_position_reset(free_pages_map);
2159 pfn = memory_bm_next_pfn(free_pages_map);
2160 while (pfn != BM_END_OF_MAP) {
2161 memory_bm_clear_current(free_pages_map);
2162 pfn = memory_bm_next_pfn(free_pages_map);
2165 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2166 duplicate_memory_bitmap(free_pages_map, bm);
2168 allocated_unsafe_pages = 0;
2171 static int check_header(struct swsusp_info *info)
2175 reason = check_image_kernel(info);
2176 if (!reason && info->num_physpages != get_num_physpages())
2177 reason = "memory size";
2179 pr_err("Image mismatch: %s\n", reason);
2186 * load header - Check the image header and copy the data from it.
2188 static int load_header(struct swsusp_info *info)
2192 restore_pblist = NULL;
2193 error = check_header(info);
2195 nr_copy_pages = info->image_pages;
2196 nr_meta_pages = info->pages - info->image_pages - 1;
2202 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2203 * @bm: Memory bitmap.
2204 * @buf: Area of memory containing the PFNs.
2206 * For each element of the array pointed to by @buf (1 page at a time), set the
2207 * corresponding bit in @bm.
2209 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2213 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2214 if (unlikely(buf[j] == BM_END_OF_MAP))
2217 /* Extract and buffer page key for data page (s390 only). */
2218 page_key_memorize(buf + j);
2220 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2221 memory_bm_set_bit(bm, buf[j]);
2229 #ifdef CONFIG_HIGHMEM
2231 * struct highmem_pbe is used for creating the list of highmem pages that
2232 * should be restored atomically during the resume from disk, because the page
2233 * frames they have occupied before the suspend are in use.
2235 struct highmem_pbe {
2236 struct page *copy_page; /* data is here now */
2237 struct page *orig_page; /* data was here before the suspend */
2238 struct highmem_pbe *next;
2242 * List of highmem PBEs needed for restoring the highmem pages that were
2243 * allocated before the suspend and included in the suspend image, but have
2244 * also been allocated by the "resume" kernel, so their contents cannot be
2245 * written directly to their "original" page frames.
2247 static struct highmem_pbe *highmem_pblist;
2250 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2251 * @bm: Memory bitmap.
2253 * The bits in @bm that correspond to image pages are assumed to be set.
2255 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2258 unsigned int cnt = 0;
2260 memory_bm_position_reset(bm);
2261 pfn = memory_bm_next_pfn(bm);
2262 while (pfn != BM_END_OF_MAP) {
2263 if (PageHighMem(pfn_to_page(pfn)))
2266 pfn = memory_bm_next_pfn(bm);
2271 static unsigned int safe_highmem_pages;
2273 static struct memory_bitmap *safe_highmem_bm;
2276 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2277 * @bm: Pointer to an uninitialized memory bitmap structure.
2278 * @nr_highmem_p: Pointer to the number of highmem image pages.
2280 * Try to allocate as many highmem pages as there are highmem image pages
2281 * (@nr_highmem_p points to the variable containing the number of highmem image
2282 * pages). The pages that are "safe" (ie. will not be overwritten when the
2283 * hibernation image is restored entirely) have the corresponding bits set in
2284 * @bm (it must be unitialized).
2286 * NOTE: This function should not be called if there are no highmem image pages.
2288 static int prepare_highmem_image(struct memory_bitmap *bm,
2289 unsigned int *nr_highmem_p)
2291 unsigned int to_alloc;
2293 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2296 if (get_highmem_buffer(PG_SAFE))
2299 to_alloc = count_free_highmem_pages();
2300 if (to_alloc > *nr_highmem_p)
2301 to_alloc = *nr_highmem_p;
2303 *nr_highmem_p = to_alloc;
2305 safe_highmem_pages = 0;
2306 while (to_alloc-- > 0) {
2309 page = alloc_page(__GFP_HIGHMEM);
2310 if (!swsusp_page_is_free(page)) {
2311 /* The page is "safe", set its bit the bitmap */
2312 memory_bm_set_bit(bm, page_to_pfn(page));
2313 safe_highmem_pages++;
2315 /* Mark the page as allocated */
2316 swsusp_set_page_forbidden(page);
2317 swsusp_set_page_free(page);
2319 memory_bm_position_reset(bm);
2320 safe_highmem_bm = bm;
2324 static struct page *last_highmem_page;
2327 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2329 * For a given highmem image page get a buffer that suspend_write_next() should
2330 * return to its caller to write to.
2332 * If the page is to be saved to its "original" page frame or a copy of
2333 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2334 * the copy of the page is to be made in normal memory, so the address of
2335 * the copy is returned.
2337 * If @buffer is returned, the caller of suspend_write_next() will write
2338 * the page's contents to @buffer, so they will have to be copied to the
2339 * right location on the next call to suspend_write_next() and it is done
2340 * with the help of copy_last_highmem_page(). For this purpose, if
2341 * @buffer is returned, @last_highmem_page is set to the page to which
2342 * the data will have to be copied from @buffer.
2344 static void *get_highmem_page_buffer(struct page *page,
2345 struct chain_allocator *ca)
2347 struct highmem_pbe *pbe;
2350 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2352 * We have allocated the "original" page frame and we can
2353 * use it directly to store the loaded page.
2355 last_highmem_page = page;
2359 * The "original" page frame has not been allocated and we have to
2360 * use a "safe" page frame to store the loaded page.
2362 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2365 return ERR_PTR(-ENOMEM);
2367 pbe->orig_page = page;
2368 if (safe_highmem_pages > 0) {
2371 /* Copy of the page will be stored in high memory */
2373 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2374 safe_highmem_pages--;
2375 last_highmem_page = tmp;
2376 pbe->copy_page = tmp;
2378 /* Copy of the page will be stored in normal memory */
2379 kaddr = safe_pages_list;
2380 safe_pages_list = safe_pages_list->next;
2381 pbe->copy_page = virt_to_page(kaddr);
2383 pbe->next = highmem_pblist;
2384 highmem_pblist = pbe;
2389 * copy_last_highmem_page - Copy most the most recent highmem image page.
2391 * Copy the contents of a highmem image from @buffer, where the caller of
2392 * snapshot_write_next() has stored them, to the right location represented by
2393 * @last_highmem_page .
2395 static void copy_last_highmem_page(void)
2397 if (last_highmem_page) {
2400 dst = kmap_atomic(last_highmem_page);
2401 copy_page(dst, buffer);
2403 last_highmem_page = NULL;
2407 static inline int last_highmem_page_copied(void)
2409 return !last_highmem_page;
2412 static inline void free_highmem_data(void)
2414 if (safe_highmem_bm)
2415 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2418 free_image_page(buffer, PG_UNSAFE_CLEAR);
2421 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2423 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2424 unsigned int *nr_highmem_p) { return 0; }
2426 static inline void *get_highmem_page_buffer(struct page *page,
2427 struct chain_allocator *ca)
2429 return ERR_PTR(-EINVAL);
2432 static inline void copy_last_highmem_page(void) {}
2433 static inline int last_highmem_page_copied(void) { return 1; }
2434 static inline void free_highmem_data(void) {}
2435 #endif /* CONFIG_HIGHMEM */
2437 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2440 * prepare_image - Make room for loading hibernation image.
2441 * @new_bm: Unitialized memory bitmap structure.
2442 * @bm: Memory bitmap with unsafe pages marked.
2444 * Use @bm to mark the pages that will be overwritten in the process of
2445 * restoring the system memory state from the suspend image ("unsafe" pages)
2446 * and allocate memory for the image.
2448 * The idea is to allocate a new memory bitmap first and then allocate
2449 * as many pages as needed for image data, but without specifying what those
2450 * pages will be used for just yet. Instead, we mark them all as allocated and
2451 * create a lists of "safe" pages to be used later. On systems with high
2452 * memory a list of "safe" highmem pages is created too.
2454 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2456 unsigned int nr_pages, nr_highmem;
2457 struct linked_page *lp;
2460 /* If there is no highmem, the buffer will not be necessary */
2461 free_image_page(buffer, PG_UNSAFE_CLEAR);
2464 nr_highmem = count_highmem_image_pages(bm);
2465 mark_unsafe_pages(bm);
2467 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2471 duplicate_memory_bitmap(new_bm, bm);
2472 memory_bm_free(bm, PG_UNSAFE_KEEP);
2473 if (nr_highmem > 0) {
2474 error = prepare_highmem_image(bm, &nr_highmem);
2479 * Reserve some safe pages for potential later use.
2481 * NOTE: This way we make sure there will be enough safe pages for the
2482 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2483 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2485 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2487 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2488 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2489 while (nr_pages > 0) {
2490 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2495 lp->next = safe_pages_list;
2496 safe_pages_list = lp;
2499 /* Preallocate memory for the image */
2500 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2501 while (nr_pages > 0) {
2502 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2507 if (!swsusp_page_is_free(virt_to_page(lp))) {
2508 /* The page is "safe", add it to the list */
2509 lp->next = safe_pages_list;
2510 safe_pages_list = lp;
2512 /* Mark the page as allocated */
2513 swsusp_set_page_forbidden(virt_to_page(lp));
2514 swsusp_set_page_free(virt_to_page(lp));
2525 * get_buffer - Get the address to store the next image data page.
2527 * Get the address that snapshot_write_next() should return to its caller to
2530 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2534 unsigned long pfn = memory_bm_next_pfn(bm);
2536 if (pfn == BM_END_OF_MAP)
2537 return ERR_PTR(-EFAULT);
2539 page = pfn_to_page(pfn);
2540 if (PageHighMem(page))
2541 return get_highmem_page_buffer(page, ca);
2543 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2545 * We have allocated the "original" page frame and we can
2546 * use it directly to store the loaded page.
2548 return page_address(page);
2551 * The "original" page frame has not been allocated and we have to
2552 * use a "safe" page frame to store the loaded page.
2554 pbe = chain_alloc(ca, sizeof(struct pbe));
2557 return ERR_PTR(-ENOMEM);
2559 pbe->orig_address = page_address(page);
2560 pbe->address = safe_pages_list;
2561 safe_pages_list = safe_pages_list->next;
2562 pbe->next = restore_pblist;
2563 restore_pblist = pbe;
2564 return pbe->address;
2568 * snapshot_write_next - Get the address to store the next image page.
2569 * @handle: Snapshot handle structure to guide the writing.
2571 * On the first call, @handle should point to a zeroed snapshot_handle
2572 * structure. The structure gets populated then and a pointer to it should be
2573 * passed to this function every next time.
2575 * On success, the function returns a positive number. Then, the caller
2576 * is allowed to write up to the returned number of bytes to the memory
2577 * location computed by the data_of() macro.
2579 * The function returns 0 to indicate the "end of file" condition. Negative
2580 * numbers are returned on errors, in which cases the structure pointed to by
2581 * @handle is not updated and should not be used any more.
2583 int snapshot_write_next(struct snapshot_handle *handle)
2585 static struct chain_allocator ca;
2588 /* Check if we have already loaded the entire image */
2589 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2592 handle->sync_read = 1;
2596 /* This makes the buffer be freed by swsusp_free() */
2597 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2602 handle->buffer = buffer;
2603 } else if (handle->cur == 1) {
2604 error = load_header(buffer);
2608 safe_pages_list = NULL;
2610 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2614 /* Allocate buffer for page keys. */
2615 error = page_key_alloc(nr_copy_pages);
2619 hibernate_restore_protection_begin();
2620 } else if (handle->cur <= nr_meta_pages + 1) {
2621 error = unpack_orig_pfns(buffer, ©_bm);
2625 if (handle->cur == nr_meta_pages + 1) {
2626 error = prepare_image(&orig_bm, ©_bm);
2630 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2631 memory_bm_position_reset(&orig_bm);
2632 restore_pblist = NULL;
2633 handle->buffer = get_buffer(&orig_bm, &ca);
2634 handle->sync_read = 0;
2635 if (IS_ERR(handle->buffer))
2636 return PTR_ERR(handle->buffer);
2639 copy_last_highmem_page();
2640 /* Restore page key for data page (s390 only). */
2641 page_key_write(handle->buffer);
2642 hibernate_restore_protect_page(handle->buffer);
2643 handle->buffer = get_buffer(&orig_bm, &ca);
2644 if (IS_ERR(handle->buffer))
2645 return PTR_ERR(handle->buffer);
2646 if (handle->buffer != buffer)
2647 handle->sync_read = 0;
2654 * snapshot_write_finalize - Complete the loading of a hibernation image.
2656 * Must be called after the last call to snapshot_write_next() in case the last
2657 * page in the image happens to be a highmem page and its contents should be
2658 * stored in highmem. Additionally, it recycles bitmap memory that's not
2659 * necessary any more.
2661 void snapshot_write_finalize(struct snapshot_handle *handle)
2663 copy_last_highmem_page();
2664 /* Restore page key for data page (s390 only). */
2665 page_key_write(handle->buffer);
2667 hibernate_restore_protect_page(handle->buffer);
2668 /* Do that only if we have loaded the image entirely */
2669 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2670 memory_bm_recycle(&orig_bm);
2671 free_highmem_data();
2675 int snapshot_image_loaded(struct snapshot_handle *handle)
2677 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2678 handle->cur <= nr_meta_pages + nr_copy_pages);
2681 #ifdef CONFIG_HIGHMEM
2682 /* Assumes that @buf is ready and points to a "safe" page */
2683 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2686 void *kaddr1, *kaddr2;
2688 kaddr1 = kmap_atomic(p1);
2689 kaddr2 = kmap_atomic(p2);
2690 copy_page(buf, kaddr1);
2691 copy_page(kaddr1, kaddr2);
2692 copy_page(kaddr2, buf);
2693 kunmap_atomic(kaddr2);
2694 kunmap_atomic(kaddr1);
2698 * restore_highmem - Put highmem image pages into their original locations.
2700 * For each highmem page that was in use before hibernation and is included in
2701 * the image, and also has been allocated by the "restore" kernel, swap its
2702 * current contents with the previous (ie. "before hibernation") ones.
2704 * If the restore eventually fails, we can call this function once again and
2705 * restore the highmem state as seen by the restore kernel.
2707 int restore_highmem(void)
2709 struct highmem_pbe *pbe = highmem_pblist;
2715 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2720 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2723 free_image_page(buf, PG_UNSAFE_CLEAR);
2726 #endif /* CONFIG_HIGHMEM */