2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/vmalloc.h>
21 #include <linux/swap_slots.h>
22 #include <linux/huge_mm.h>
24 #include <asm/pgtable.h>
27 * swapper_space is a fiction, retained to simplify the path through
28 * vmscan's shrink_page_list.
30 static const struct address_space_operations swap_aops = {
31 .writepage = swap_writepage,
32 .set_page_dirty = swap_set_page_dirty,
33 #ifdef CONFIG_MIGRATION
34 .migratepage = migrate_page,
38 struct address_space *swapper_spaces[MAX_SWAPFILES];
39 static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
40 bool swap_vma_readahead = true;
42 #define SWAP_RA_MAX_ORDER_DEFAULT 3
44 static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;
46 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
47 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
48 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
49 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
51 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
52 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
53 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
55 #define SWAP_RA_VAL(addr, win, hits) \
56 (((addr) & PAGE_MASK) | \
57 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
58 ((hits) & SWAP_RA_HITS_MASK))
60 /* Initial readahead hits is 4 to start up with a small window */
61 #define GET_SWAP_RA_VAL(vma) \
62 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
64 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
65 #define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
68 unsigned long add_total;
69 unsigned long del_total;
70 unsigned long find_success;
71 unsigned long find_total;
74 unsigned long total_swapcache_pages(void)
76 unsigned int i, j, nr;
77 unsigned long ret = 0;
78 struct address_space *spaces;
81 for (i = 0; i < MAX_SWAPFILES; i++) {
83 * The corresponding entries in nr_swapper_spaces and
84 * swapper_spaces will be reused only after at least
85 * one grace period. So it is impossible for them
86 * belongs to different usage.
88 nr = nr_swapper_spaces[i];
89 spaces = rcu_dereference(swapper_spaces[i]);
92 for (j = 0; j < nr; j++)
93 ret += spaces[j].nrpages;
99 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
101 void show_swap_cache_info(void)
103 printk("%lu pages in swap cache\n", total_swapcache_pages());
104 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
105 swap_cache_info.add_total, swap_cache_info.del_total,
106 swap_cache_info.find_success, swap_cache_info.find_total);
107 printk("Free swap = %ldkB\n",
108 get_nr_swap_pages() << (PAGE_SHIFT - 10));
109 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
113 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
114 * but sets SwapCache flag and private instead of mapping and index.
116 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
118 int error, i, nr = hpage_nr_pages(page);
119 struct address_space *address_space;
120 pgoff_t idx = swp_offset(entry);
122 VM_BUG_ON_PAGE(!PageLocked(page), page);
123 VM_BUG_ON_PAGE(PageSwapCache(page), page);
124 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
126 page_ref_add(page, nr);
127 SetPageSwapCache(page);
129 address_space = swap_address_space(entry);
130 spin_lock_irq(&address_space->tree_lock);
131 for (i = 0; i < nr; i++) {
132 set_page_private(page + i, entry.val + i);
133 error = radix_tree_insert(&address_space->page_tree,
138 if (likely(!error)) {
139 address_space->nrpages += nr;
140 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
141 ADD_CACHE_INFO(add_total, nr);
144 * Only the context which have set SWAP_HAS_CACHE flag
145 * would call add_to_swap_cache().
146 * So add_to_swap_cache() doesn't returns -EEXIST.
148 VM_BUG_ON(error == -EEXIST);
149 set_page_private(page + i, 0UL);
151 radix_tree_delete(&address_space->page_tree, idx + i);
152 set_page_private(page + i, 0UL);
154 ClearPageSwapCache(page);
155 page_ref_sub(page, nr);
157 spin_unlock_irq(&address_space->tree_lock);
163 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
167 error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
169 error = __add_to_swap_cache(page, entry);
170 radix_tree_preload_end();
176 * This must be called only on pages that have
177 * been verified to be in the swap cache.
179 void __delete_from_swap_cache(struct page *page)
181 struct address_space *address_space;
182 int i, nr = hpage_nr_pages(page);
186 VM_BUG_ON_PAGE(!PageLocked(page), page);
187 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
188 VM_BUG_ON_PAGE(PageWriteback(page), page);
190 entry.val = page_private(page);
191 address_space = swap_address_space(entry);
192 idx = swp_offset(entry);
193 for (i = 0; i < nr; i++) {
194 radix_tree_delete(&address_space->page_tree, idx + i);
195 set_page_private(page + i, 0);
197 ClearPageSwapCache(page);
198 address_space->nrpages -= nr;
199 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
200 ADD_CACHE_INFO(del_total, nr);
204 * add_to_swap - allocate swap space for a page
205 * @page: page we want to move to swap
207 * Allocate swap space for the page and add the page to the
208 * swap cache. Caller needs to hold the page lock.
210 int add_to_swap(struct page *page)
215 VM_BUG_ON_PAGE(!PageLocked(page), page);
216 VM_BUG_ON_PAGE(!PageUptodate(page), page);
218 entry = get_swap_page(page);
222 if (mem_cgroup_try_charge_swap(page, entry))
226 * Radix-tree node allocations from PF_MEMALLOC contexts could
227 * completely exhaust the page allocator. __GFP_NOMEMALLOC
228 * stops emergency reserves from being allocated.
230 * TODO: this could cause a theoretical memory reclaim
231 * deadlock in the swap out path.
234 * Add it to the swap cache.
236 err = add_to_swap_cache(page, entry,
237 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
238 /* -ENOMEM radix-tree allocation failure */
241 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
242 * clear SWAP_HAS_CACHE flag.
246 * Normally the page will be dirtied in unmap because its pte should be
247 * dirty. A special case is MADV_FREE page. The page'e pte could have
248 * dirty bit cleared but the page's SwapBacked bit is still set because
249 * clearing the dirty bit and SwapBacked bit has no lock protected. For
250 * such page, unmap will not set dirty bit for it, so page reclaim will
251 * not write the page out. This can cause data corruption when the page
252 * is swap in later. Always setting the dirty bit for the page solves
255 set_page_dirty(page);
260 put_swap_page(page, entry);
265 * This must be called only on pages that have
266 * been verified to be in the swap cache and locked.
267 * It will never put the page into the free list,
268 * the caller has a reference on the page.
270 void delete_from_swap_cache(struct page *page)
273 struct address_space *address_space;
275 entry.val = page_private(page);
277 address_space = swap_address_space(entry);
278 spin_lock_irq(&address_space->tree_lock);
279 __delete_from_swap_cache(page);
280 spin_unlock_irq(&address_space->tree_lock);
282 put_swap_page(page, entry);
283 page_ref_sub(page, hpage_nr_pages(page));
287 * If we are the only user, then try to free up the swap cache.
289 * Its ok to check for PageSwapCache without the page lock
290 * here because we are going to recheck again inside
291 * try_to_free_swap() _with_ the lock.
294 static inline void free_swap_cache(struct page *page)
296 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
297 try_to_free_swap(page);
303 * Perform a free_page(), also freeing any swap cache associated with
304 * this page if it is the last user of the page.
306 void free_page_and_swap_cache(struct page *page)
308 free_swap_cache(page);
309 if (!is_huge_zero_page(page))
314 * Passed an array of pages, drop them all from swapcache and then release
315 * them. They are removed from the LRU and freed if this is their last use.
317 void free_pages_and_swap_cache(struct page **pages, int nr)
319 struct page **pagep = pages;
323 for (i = 0; i < nr; i++)
324 free_swap_cache(pagep[i]);
325 release_pages(pagep, nr, false);
329 * Lookup a swap entry in the swap cache. A found page will be returned
330 * unlocked and with its refcount incremented - we rely on the kernel
331 * lock getting page table operations atomic even if we drop the page
332 * lock before returning.
334 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
338 unsigned long ra_info;
339 int win, hits, readahead;
341 page = find_get_page(swap_address_space(entry), swp_offset(entry));
343 INC_CACHE_INFO(find_total);
345 INC_CACHE_INFO(find_success);
346 if (unlikely(PageTransCompound(page)))
348 readahead = TestClearPageReadahead(page);
350 ra_info = GET_SWAP_RA_VAL(vma);
351 win = SWAP_RA_WIN(ra_info);
352 hits = SWAP_RA_HITS(ra_info);
354 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
355 atomic_long_set(&vma->swap_readahead_info,
356 SWAP_RA_VAL(addr, win, hits));
359 count_vm_event(SWAP_RA_HIT);
361 atomic_inc(&swapin_readahead_hits);
367 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
368 struct vm_area_struct *vma, unsigned long addr,
369 bool *new_page_allocated)
371 struct page *found_page, *new_page = NULL;
372 struct address_space *swapper_space = swap_address_space(entry);
374 *new_page_allocated = false;
378 * First check the swap cache. Since this is normally
379 * called after lookup_swap_cache() failed, re-calling
380 * that would confuse statistics.
382 found_page = find_get_page(swapper_space, swp_offset(entry));
387 * Just skip read ahead for unused swap slot.
388 * During swap_off when swap_slot_cache is disabled,
389 * we have to handle the race between putting
390 * swap entry in swap cache and marking swap slot
391 * as SWAP_HAS_CACHE. That's done in later part of code or
392 * else swap_off will be aborted if we return NULL.
394 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
398 * Get a new page to read into from swap.
401 new_page = alloc_page_vma(gfp_mask, vma, addr);
403 break; /* Out of memory */
407 * call radix_tree_preload() while we can wait.
409 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
414 * Swap entry may have been freed since our caller observed it.
416 err = swapcache_prepare(entry);
417 if (err == -EEXIST) {
418 radix_tree_preload_end();
420 * We might race against get_swap_page() and stumble
421 * across a SWAP_HAS_CACHE swap_map entry whose page
422 * has not been brought into the swapcache yet.
427 if (err) { /* swp entry is obsolete ? */
428 radix_tree_preload_end();
432 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
433 __SetPageLocked(new_page);
434 __SetPageSwapBacked(new_page);
435 err = __add_to_swap_cache(new_page, entry);
437 radix_tree_preload_end();
439 * Initiate read into locked page and return.
441 lru_cache_add_anon(new_page);
442 *new_page_allocated = true;
445 radix_tree_preload_end();
446 __ClearPageLocked(new_page);
448 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
449 * clear SWAP_HAS_CACHE flag.
451 put_swap_page(new_page, entry);
452 } while (err != -ENOMEM);
460 * Locate a page of swap in physical memory, reserving swap cache space
461 * and reading the disk if it is not already cached.
462 * A failure return means that either the page allocation failed or that
463 * the swap entry is no longer in use.
465 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
466 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
468 bool page_was_allocated;
469 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
470 vma, addr, &page_was_allocated);
472 if (page_was_allocated)
473 swap_readpage(retpage, do_poll);
478 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
479 unsigned long offset,
484 unsigned int pages, last_ra;
487 * This heuristic has been found to work well on both sequential and
488 * random loads, swapping to hard disk or to SSD: please don't ask
489 * what the "+ 2" means, it just happens to work well, that's all.
494 * We can have no readahead hits to judge by: but must not get
495 * stuck here forever, so check for an adjacent offset instead
496 * (and don't even bother to check whether swap type is same).
498 if (offset != prev_offset + 1 && offset != prev_offset - 1)
501 unsigned int roundup = 4;
502 while (roundup < pages)
507 if (pages > max_pages)
510 /* Don't shrink readahead too fast */
511 last_ra = prev_win / 2;
518 static unsigned long swapin_nr_pages(unsigned long offset)
520 static unsigned long prev_offset;
521 unsigned int hits, pages, max_pages;
522 static atomic_t last_readahead_pages;
524 max_pages = 1 << READ_ONCE(page_cluster);
528 hits = atomic_xchg(&swapin_readahead_hits, 0);
529 pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
530 atomic_read(&last_readahead_pages));
532 prev_offset = offset;
533 atomic_set(&last_readahead_pages, pages);
539 * swapin_readahead - swap in pages in hope we need them soon
540 * @entry: swap entry of this memory
541 * @gfp_mask: memory allocation flags
542 * @vma: user vma this address belongs to
543 * @addr: target address for mempolicy
545 * Returns the struct page for entry and addr, after queueing swapin.
547 * Primitive swap readahead code. We simply read an aligned block of
548 * (1 << page_cluster) entries in the swap area. This method is chosen
549 * because it doesn't cost us any seek time. We also make sure to queue
550 * the 'original' request together with the readahead ones...
552 * This has been extended to use the NUMA policies from the mm triggering
555 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
557 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
558 struct vm_area_struct *vma, unsigned long addr)
561 unsigned long entry_offset = swp_offset(entry);
562 unsigned long offset = entry_offset;
563 unsigned long start_offset, end_offset;
565 struct blk_plug plug;
566 bool do_poll = true, page_allocated;
568 mask = swapin_nr_pages(offset) - 1;
573 /* Read a page_cluster sized and aligned cluster around offset. */
574 start_offset = offset & ~mask;
575 end_offset = offset | mask;
576 if (!start_offset) /* First page is swap header. */
579 blk_start_plug(&plug);
580 for (offset = start_offset; offset <= end_offset ; offset++) {
581 /* Ok, do the async read-ahead now */
582 page = __read_swap_cache_async(
583 swp_entry(swp_type(entry), offset),
584 gfp_mask, vma, addr, &page_allocated);
587 if (page_allocated) {
588 swap_readpage(page, false);
589 if (offset != entry_offset &&
590 likely(!PageTransCompound(page))) {
591 SetPageReadahead(page);
592 count_vm_event(SWAP_RA);
597 blk_finish_plug(&plug);
599 lru_add_drain(); /* Push any new pages onto the LRU now */
601 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
604 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
606 struct address_space *spaces, *space;
609 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
610 spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
613 for (i = 0; i < nr; i++) {
615 INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
616 atomic_set(&space->i_mmap_writable, 0);
617 space->a_ops = &swap_aops;
618 /* swap cache doesn't use writeback related tags */
619 mapping_set_no_writeback_tags(space);
620 spin_lock_init(&space->tree_lock);
622 nr_swapper_spaces[type] = nr;
623 rcu_assign_pointer(swapper_spaces[type], spaces);
628 void exit_swap_address_space(unsigned int type)
630 struct address_space *spaces;
632 spaces = swapper_spaces[type];
633 nr_swapper_spaces[type] = 0;
634 rcu_assign_pointer(swapper_spaces[type], NULL);
639 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
643 unsigned long *start,
646 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
647 PFN_DOWN(faddr & PMD_MASK));
648 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
649 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
652 struct page *swap_readahead_detect(struct vm_fault *vmf,
653 struct vma_swap_readahead *swap_ra)
655 struct vm_area_struct *vma = vmf->vma;
656 unsigned long swap_ra_info;
659 unsigned long faddr, pfn, fpfn;
660 unsigned long start, end;
662 unsigned int max_win, hits, prev_win, win, left;
667 faddr = vmf->address;
668 entry = pte_to_swp_entry(vmf->orig_pte);
669 if ((unlikely(non_swap_entry(entry))))
671 page = lookup_swap_cache(entry, vma, faddr);
675 max_win = 1 << READ_ONCE(swap_ra_max_order);
681 fpfn = PFN_DOWN(faddr);
682 swap_ra_info = GET_SWAP_RA_VAL(vma);
683 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
684 prev_win = SWAP_RA_WIN(swap_ra_info);
685 hits = SWAP_RA_HITS(swap_ra_info);
686 swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits,
688 atomic_long_set(&vma->swap_readahead_info,
689 SWAP_RA_VAL(faddr, win, 0));
694 /* Copy the PTEs because the page table may be unmapped */
696 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
697 else if (pfn == fpfn + 1)
698 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
701 left = (win - 1) / 2;
702 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
705 swap_ra->nr_pte = end - start;
706 swap_ra->offset = fpfn - start;
707 pte = vmf->pte - swap_ra->offset;
711 tpte = swap_ra->ptes;
712 for (pfn = start; pfn != end; pfn++)
719 struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
720 struct vm_fault *vmf,
721 struct vma_swap_readahead *swap_ra)
723 struct blk_plug plug;
724 struct vm_area_struct *vma = vmf->vma;
731 if (swap_ra->win == 1)
734 blk_start_plug(&plug);
735 for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte;
738 if (pte_none(pentry))
740 if (pte_present(pentry))
742 entry = pte_to_swp_entry(pentry);
743 if (unlikely(non_swap_entry(entry)))
745 page = __read_swap_cache_async(entry, gfp_mask, vma,
746 vmf->address, &page_allocated);
749 if (page_allocated) {
750 swap_readpage(page, false);
751 if (i != swap_ra->offset &&
752 likely(!PageTransCompound(page))) {
753 SetPageReadahead(page);
754 count_vm_event(SWAP_RA);
759 blk_finish_plug(&plug);
762 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
767 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
768 struct kobj_attribute *attr, char *buf)
770 return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false");
772 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
773 struct kobj_attribute *attr,
774 const char *buf, size_t count)
776 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
777 swap_vma_readahead = true;
778 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
779 swap_vma_readahead = false;
785 static struct kobj_attribute vma_ra_enabled_attr =
786 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
787 vma_ra_enabled_store);
789 static ssize_t vma_ra_max_order_show(struct kobject *kobj,
790 struct kobj_attribute *attr, char *buf)
792 return sprintf(buf, "%d\n", swap_ra_max_order);
794 static ssize_t vma_ra_max_order_store(struct kobject *kobj,
795 struct kobj_attribute *attr,
796 const char *buf, size_t count)
800 err = kstrtoint(buf, 10, &v);
801 if (err || v > SWAP_RA_ORDER_CEILING || v <= 0)
804 swap_ra_max_order = v;
808 static struct kobj_attribute vma_ra_max_order_attr =
809 __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show,
810 vma_ra_max_order_store);
812 static struct attribute *swap_attrs[] = {
813 &vma_ra_enabled_attr.attr,
814 &vma_ra_max_order_attr.attr,
818 static struct attribute_group swap_attr_group = {
822 static int __init swap_init_sysfs(void)
825 struct kobject *swap_kobj;
827 swap_kobj = kobject_create_and_add("swap", mm_kobj);
829 pr_err("failed to create swap kobject\n");
832 err = sysfs_create_group(swap_kobj, &swap_attr_group);
834 pr_err("failed to register swap group\n");
840 kobject_put(swap_kobj);
843 subsys_initcall(swap_init_sysfs);