GNU Linux-libre 4.9.297-gnu1
[releases.git] / mm / swap_state.c
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20
21 #include <asm/pgtable.h>
22 #include "internal.h"
23
24 /*
25  * swapper_space is a fiction, retained to simplify the path through
26  * vmscan's shrink_page_list.
27  */
28 static const struct address_space_operations swap_aops = {
29         .writepage      = swap_writepage,
30         .set_page_dirty = swap_set_page_dirty,
31 #ifdef CONFIG_MIGRATION
32         .migratepage    = migrate_page,
33 #endif
34 };
35
36 struct address_space swapper_spaces[MAX_SWAPFILES] = {
37         [0 ... MAX_SWAPFILES - 1] = {
38                 .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
39                 .i_mmap_writable = ATOMIC_INIT(0),
40                 .a_ops          = &swap_aops,
41                 /* swap cache doesn't use writeback related tags */
42                 .flags          = 1 << AS_NO_WRITEBACK_TAGS,
43         }
44 };
45
46 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
47
48 static struct {
49         unsigned long add_total;
50         unsigned long del_total;
51         unsigned long find_success;
52         unsigned long find_total;
53 } swap_cache_info;
54
55 unsigned long total_swapcache_pages(void)
56 {
57         int i;
58         unsigned long ret = 0;
59
60         for (i = 0; i < MAX_SWAPFILES; i++)
61                 ret += swapper_spaces[i].nrpages;
62         return ret;
63 }
64
65 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
66
67 void show_swap_cache_info(void)
68 {
69         printk("%lu pages in swap cache\n", total_swapcache_pages());
70         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
71                 swap_cache_info.add_total, swap_cache_info.del_total,
72                 swap_cache_info.find_success, swap_cache_info.find_total);
73         printk("Free swap  = %ldkB\n",
74                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
75         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
76 }
77
78 /*
79  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
80  * but sets SwapCache flag and private instead of mapping and index.
81  */
82 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
83 {
84         int error;
85         struct address_space *address_space;
86
87         VM_BUG_ON_PAGE(!PageLocked(page), page);
88         VM_BUG_ON_PAGE(PageSwapCache(page), page);
89         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
90
91         get_page(page);
92         SetPageSwapCache(page);
93         set_page_private(page, entry.val);
94
95         address_space = swap_address_space(entry);
96         spin_lock_irq(&address_space->tree_lock);
97         error = radix_tree_insert(&address_space->page_tree,
98                                   swp_offset(entry), page);
99         if (likely(!error)) {
100                 address_space->nrpages++;
101                 __inc_node_page_state(page, NR_FILE_PAGES);
102                 INC_CACHE_INFO(add_total);
103         }
104         spin_unlock_irq(&address_space->tree_lock);
105
106         if (unlikely(error)) {
107                 /*
108                  * Only the context which have set SWAP_HAS_CACHE flag
109                  * would call add_to_swap_cache().
110                  * So add_to_swap_cache() doesn't returns -EEXIST.
111                  */
112                 VM_BUG_ON(error == -EEXIST);
113                 set_page_private(page, 0UL);
114                 ClearPageSwapCache(page);
115                 put_page(page);
116         }
117
118         return error;
119 }
120
121
122 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
123 {
124         int error;
125
126         error = radix_tree_maybe_preload(gfp_mask);
127         if (!error) {
128                 error = __add_to_swap_cache(page, entry);
129                 radix_tree_preload_end();
130         }
131         return error;
132 }
133
134 /*
135  * This must be called only on pages that have
136  * been verified to be in the swap cache.
137  */
138 void __delete_from_swap_cache(struct page *page)
139 {
140         swp_entry_t entry;
141         struct address_space *address_space;
142
143         VM_BUG_ON_PAGE(!PageLocked(page), page);
144         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
145         VM_BUG_ON_PAGE(PageWriteback(page), page);
146
147         entry.val = page_private(page);
148         address_space = swap_address_space(entry);
149         radix_tree_delete(&address_space->page_tree, swp_offset(entry));
150         set_page_private(page, 0);
151         ClearPageSwapCache(page);
152         address_space->nrpages--;
153         __dec_node_page_state(page, NR_FILE_PAGES);
154         INC_CACHE_INFO(del_total);
155 }
156
157 /**
158  * add_to_swap - allocate swap space for a page
159  * @page: page we want to move to swap
160  *
161  * Allocate swap space for the page and add the page to the
162  * swap cache.  Caller needs to hold the page lock. 
163  */
164 int add_to_swap(struct page *page, struct list_head *list)
165 {
166         swp_entry_t entry;
167         int err;
168
169         VM_BUG_ON_PAGE(!PageLocked(page), page);
170         VM_BUG_ON_PAGE(!PageUptodate(page), page);
171
172         entry = get_swap_page();
173         if (!entry.val)
174                 return 0;
175
176         if (mem_cgroup_try_charge_swap(page, entry)) {
177                 swapcache_free(entry);
178                 return 0;
179         }
180
181         if (unlikely(PageTransHuge(page)))
182                 if (unlikely(split_huge_page_to_list(page, list))) {
183                         swapcache_free(entry);
184                         return 0;
185                 }
186
187         /*
188          * Radix-tree node allocations from PF_MEMALLOC contexts could
189          * completely exhaust the page allocator. __GFP_NOMEMALLOC
190          * stops emergency reserves from being allocated.
191          *
192          * TODO: this could cause a theoretical memory reclaim
193          * deadlock in the swap out path.
194          */
195         /*
196          * Add it to the swap cache.
197          */
198         err = add_to_swap_cache(page, entry,
199                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
200
201         if (!err) {
202                 return 1;
203         } else {        /* -ENOMEM radix-tree allocation failure */
204                 /*
205                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206                  * clear SWAP_HAS_CACHE flag.
207                  */
208                 swapcache_free(entry);
209                 return 0;
210         }
211 }
212
213 /*
214  * This must be called only on pages that have
215  * been verified to be in the swap cache and locked.
216  * It will never put the page into the free list,
217  * the caller has a reference on the page.
218  */
219 void delete_from_swap_cache(struct page *page)
220 {
221         swp_entry_t entry;
222         struct address_space *address_space;
223
224         entry.val = page_private(page);
225
226         address_space = swap_address_space(entry);
227         spin_lock_irq(&address_space->tree_lock);
228         __delete_from_swap_cache(page);
229         spin_unlock_irq(&address_space->tree_lock);
230
231         swapcache_free(entry);
232         put_page(page);
233 }
234
235 /* 
236  * If we are the only user, then try to free up the swap cache. 
237  * 
238  * Its ok to check for PageSwapCache without the page lock
239  * here because we are going to recheck again inside
240  * try_to_free_swap() _with_ the lock.
241  *                                      - Marcelo
242  */
243 static inline void free_swap_cache(struct page *page)
244 {
245         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
246                 try_to_free_swap(page);
247                 unlock_page(page);
248         }
249 }
250
251 /* 
252  * Perform a free_page(), also freeing any swap cache associated with
253  * this page if it is the last user of the page.
254  */
255 void free_page_and_swap_cache(struct page *page)
256 {
257         free_swap_cache(page);
258         if (!is_huge_zero_page(page))
259                 put_page(page);
260 }
261
262 /*
263  * Passed an array of pages, drop them all from swapcache and then release
264  * them.  They are removed from the LRU and freed if this is their last use.
265  */
266 void free_pages_and_swap_cache(struct page **pages, int nr)
267 {
268         struct page **pagep = pages;
269         int i;
270
271         lru_add_drain();
272         for (i = 0; i < nr; i++)
273                 free_swap_cache(pagep[i]);
274         release_pages(pagep, nr, false);
275 }
276
277 /*
278  * Lookup a swap entry in the swap cache. A found page will be returned
279  * unlocked and with its refcount incremented - we rely on the kernel
280  * lock getting page table operations atomic even if we drop the page
281  * lock before returning.
282  */
283 struct page * lookup_swap_cache(swp_entry_t entry)
284 {
285         struct page *page;
286
287         page = find_get_page(swap_address_space(entry), swp_offset(entry));
288
289         if (page) {
290                 INC_CACHE_INFO(find_success);
291                 if (TestClearPageReadahead(page))
292                         atomic_inc(&swapin_readahead_hits);
293         }
294
295         INC_CACHE_INFO(find_total);
296         return page;
297 }
298
299 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
300                         struct vm_area_struct *vma, unsigned long addr,
301                         bool *new_page_allocated)
302 {
303         struct page *found_page, *new_page = NULL;
304         struct address_space *swapper_space = swap_address_space(entry);
305         int err;
306         *new_page_allocated = false;
307
308         do {
309                 /*
310                  * First check the swap cache.  Since this is normally
311                  * called after lookup_swap_cache() failed, re-calling
312                  * that would confuse statistics.
313                  */
314                 found_page = find_get_page(swapper_space, swp_offset(entry));
315                 if (found_page)
316                         break;
317
318                 /*
319                  * Get a new page to read into from swap.
320                  */
321                 if (!new_page) {
322                         new_page = alloc_page_vma(gfp_mask, vma, addr);
323                         if (!new_page)
324                                 break;          /* Out of memory */
325                 }
326
327                 /*
328                  * call radix_tree_preload() while we can wait.
329                  */
330                 err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
331                 if (err)
332                         break;
333
334                 /*
335                  * Swap entry may have been freed since our caller observed it.
336                  */
337                 err = swapcache_prepare(entry);
338                 if (err == -EEXIST) {
339                         radix_tree_preload_end();
340                         /*
341                          * We might race against get_swap_page() and stumble
342                          * across a SWAP_HAS_CACHE swap_map entry whose page
343                          * has not been brought into the swapcache yet, while
344                          * the other end is scheduled away waiting on discard
345                          * I/O completion at scan_swap_map().
346                          *
347                          * In order to avoid turning this transitory state
348                          * into a permanent loop around this -EEXIST case
349                          * if !CONFIG_PREEMPT and the I/O completion happens
350                          * to be waiting on the CPU waitqueue where we are now
351                          * busy looping, we just conditionally invoke the
352                          * scheduler here, if there are some more important
353                          * tasks to run.
354                          */
355                         cond_resched();
356                         continue;
357                 }
358                 if (err) {              /* swp entry is obsolete ? */
359                         radix_tree_preload_end();
360                         break;
361                 }
362
363                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
364                 __SetPageLocked(new_page);
365                 __SetPageSwapBacked(new_page);
366                 err = __add_to_swap_cache(new_page, entry);
367                 if (likely(!err)) {
368                         radix_tree_preload_end();
369                         /*
370                          * Initiate read into locked page and return.
371                          */
372                         lru_cache_add_anon(new_page);
373                         *new_page_allocated = true;
374                         return new_page;
375                 }
376                 radix_tree_preload_end();
377                 __ClearPageLocked(new_page);
378                 /*
379                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
380                  * clear SWAP_HAS_CACHE flag.
381                  */
382                 swapcache_free(entry);
383         } while (err != -ENOMEM);
384
385         if (new_page)
386                 put_page(new_page);
387         return found_page;
388 }
389
390 /*
391  * Locate a page of swap in physical memory, reserving swap cache space
392  * and reading the disk if it is not already cached.
393  * A failure return means that either the page allocation failed or that
394  * the swap entry is no longer in use.
395  */
396 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
397                         struct vm_area_struct *vma, unsigned long addr)
398 {
399         bool page_was_allocated;
400         struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
401                         vma, addr, &page_was_allocated);
402
403         if (page_was_allocated)
404                 swap_readpage(retpage);
405
406         return retpage;
407 }
408
409 static unsigned long swapin_nr_pages(unsigned long offset)
410 {
411         static unsigned long prev_offset;
412         unsigned int pages, max_pages, last_ra;
413         static atomic_t last_readahead_pages;
414
415         max_pages = 1 << READ_ONCE(page_cluster);
416         if (max_pages <= 1)
417                 return 1;
418
419         /*
420          * This heuristic has been found to work well on both sequential and
421          * random loads, swapping to hard disk or to SSD: please don't ask
422          * what the "+ 2" means, it just happens to work well, that's all.
423          */
424         pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
425         if (pages == 2) {
426                 /*
427                  * We can have no readahead hits to judge by: but must not get
428                  * stuck here forever, so check for an adjacent offset instead
429                  * (and don't even bother to check whether swap type is same).
430                  */
431                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
432                         pages = 1;
433                 prev_offset = offset;
434         } else {
435                 unsigned int roundup = 4;
436                 while (roundup < pages)
437                         roundup <<= 1;
438                 pages = roundup;
439         }
440
441         if (pages > max_pages)
442                 pages = max_pages;
443
444         /* Don't shrink readahead too fast */
445         last_ra = atomic_read(&last_readahead_pages) / 2;
446         if (pages < last_ra)
447                 pages = last_ra;
448         atomic_set(&last_readahead_pages, pages);
449
450         return pages;
451 }
452
453 /**
454  * swapin_readahead - swap in pages in hope we need them soon
455  * @entry: swap entry of this memory
456  * @gfp_mask: memory allocation flags
457  * @vma: user vma this address belongs to
458  * @addr: target address for mempolicy
459  *
460  * Returns the struct page for entry and addr, after queueing swapin.
461  *
462  * Primitive swap readahead code. We simply read an aligned block of
463  * (1 << page_cluster) entries in the swap area. This method is chosen
464  * because it doesn't cost us any seek time.  We also make sure to queue
465  * the 'original' request together with the readahead ones...
466  *
467  * This has been extended to use the NUMA policies from the mm triggering
468  * the readahead.
469  *
470  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
471  */
472 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
473                         struct vm_area_struct *vma, unsigned long addr)
474 {
475         struct page *page;
476         unsigned long entry_offset = swp_offset(entry);
477         unsigned long offset = entry_offset;
478         unsigned long start_offset, end_offset;
479         unsigned long mask;
480         struct blk_plug plug;
481
482         mask = swapin_nr_pages(offset) - 1;
483         if (!mask)
484                 goto skip;
485
486         /* Read a page_cluster sized and aligned cluster around offset. */
487         start_offset = offset & ~mask;
488         end_offset = offset | mask;
489         if (!start_offset)      /* First page is swap header. */
490                 start_offset++;
491
492         blk_start_plug(&plug);
493         for (offset = start_offset; offset <= end_offset ; offset++) {
494                 /* Ok, do the async read-ahead now */
495                 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
496                                                 gfp_mask, vma, addr);
497                 if (!page)
498                         continue;
499                 if (offset != entry_offset)
500                         SetPageReadahead(page);
501                 put_page(page);
502         }
503         blk_finish_plug(&plug);
504
505         lru_add_drain();        /* Push any new pages onto the LRU now */
506 skip:
507         return read_swap_cache_async(entry, gfp_mask, vma, addr);
508 }