1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/swap_state.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/vmalloc.h>
22 #include <linux/swap_slots.h>
23 #include <linux/huge_mm.h>
24 #include <linux/shmem_fs.h>
28 * swapper_space is a fiction, retained to simplify the path through
29 * vmscan's shrink_page_list.
31 static const struct address_space_operations swap_aops = {
32 .writepage = swap_writepage,
33 .set_page_dirty = swap_set_page_dirty,
34 #ifdef CONFIG_MIGRATION
35 .migratepage = migrate_page,
39 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static bool enable_vma_readahead __read_mostly = true;
43 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52 #define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
57 /* Initial readahead hits is 4 to start up with a small window */
58 #define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
65 unsigned long add_total;
66 unsigned long del_total;
67 unsigned long find_success;
68 unsigned long find_total;
71 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
73 void show_swap_cache_info(void)
75 printk("%lu pages in swap cache\n", total_swapcache_pages());
76 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
77 swap_cache_info.add_total, swap_cache_info.del_total,
78 swap_cache_info.find_success, swap_cache_info.find_total);
79 printk("Free swap = %ldkB\n",
80 get_nr_swap_pages() << (PAGE_SHIFT - 10));
81 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
84 void *get_shadow_from_swap_cache(swp_entry_t entry)
86 struct address_space *address_space = swap_address_space(entry);
87 pgoff_t idx = swp_offset(entry);
90 page = find_get_entry(address_space, idx);
91 if (xa_is_value(page))
99 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
100 * but sets SwapCache flag and private instead of mapping and index.
102 int add_to_swap_cache(struct page *page, swp_entry_t entry,
103 gfp_t gfp, void **shadowp)
105 struct address_space *address_space = swap_address_space(entry);
106 pgoff_t idx = swp_offset(entry);
107 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
108 unsigned long i, nr = thp_nr_pages(page);
111 VM_BUG_ON_PAGE(!PageLocked(page), page);
112 VM_BUG_ON_PAGE(PageSwapCache(page), page);
113 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
115 page_ref_add(page, nr);
116 SetPageSwapCache(page);
119 unsigned long nr_shadows = 0;
122 xas_create_range(&xas);
125 for (i = 0; i < nr; i++) {
126 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
127 old = xas_load(&xas);
128 if (xa_is_value(old)) {
133 set_page_private(page + i, entry.val + i);
134 xas_store(&xas, page);
137 address_space->nrexceptional -= nr_shadows;
138 address_space->nrpages += nr;
139 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
140 __mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
141 ADD_CACHE_INFO(add_total, nr);
143 xas_unlock_irq(&xas);
144 } while (xas_nomem(&xas, gfp));
146 if (!xas_error(&xas))
149 ClearPageSwapCache(page);
150 page_ref_sub(page, nr);
151 return xas_error(&xas);
155 * This must be called only on pages that have
156 * been verified to be in the swap cache.
158 void __delete_from_swap_cache(struct page *page,
159 swp_entry_t entry, void *shadow)
161 struct address_space *address_space = swap_address_space(entry);
162 int i, nr = thp_nr_pages(page);
163 pgoff_t idx = swp_offset(entry);
164 XA_STATE(xas, &address_space->i_pages, idx);
166 VM_BUG_ON_PAGE(!PageLocked(page), page);
167 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
168 VM_BUG_ON_PAGE(PageWriteback(page), page);
170 for (i = 0; i < nr; i++) {
171 void *entry = xas_store(&xas, shadow);
172 VM_BUG_ON_PAGE(entry != page, entry);
173 set_page_private(page + i, 0);
176 ClearPageSwapCache(page);
178 address_space->nrexceptional += nr;
179 address_space->nrpages -= nr;
180 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
181 __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
182 ADD_CACHE_INFO(del_total, nr);
186 * add_to_swap - allocate swap space for a page
187 * @page: page we want to move to swap
189 * Allocate swap space for the page and add the page to the
190 * swap cache. Caller needs to hold the page lock.
192 int add_to_swap(struct page *page)
197 VM_BUG_ON_PAGE(!PageLocked(page), page);
198 VM_BUG_ON_PAGE(!PageUptodate(page), page);
200 entry = get_swap_page(page);
205 * XArray node allocations from PF_MEMALLOC contexts could
206 * completely exhaust the page allocator. __GFP_NOMEMALLOC
207 * stops emergency reserves from being allocated.
209 * TODO: this could cause a theoretical memory reclaim
210 * deadlock in the swap out path.
213 * Add it to the swap cache.
215 err = add_to_swap_cache(page, entry,
216 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
219 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
220 * clear SWAP_HAS_CACHE flag.
224 * Normally the page will be dirtied in unmap because its pte should be
225 * dirty. A special case is MADV_FREE page. The page's pte could have
226 * dirty bit cleared but the page's SwapBacked bit is still set because
227 * clearing the dirty bit and SwapBacked bit has no lock protected. For
228 * such page, unmap will not set dirty bit for it, so page reclaim will
229 * not write the page out. This can cause data corruption when the page
230 * is swap in later. Always setting the dirty bit for the page solves
233 set_page_dirty(page);
238 put_swap_page(page, entry);
243 * This must be called only on pages that have
244 * been verified to be in the swap cache and locked.
245 * It will never put the page into the free list,
246 * the caller has a reference on the page.
248 void delete_from_swap_cache(struct page *page)
250 swp_entry_t entry = { .val = page_private(page) };
251 struct address_space *address_space = swap_address_space(entry);
253 xa_lock_irq(&address_space->i_pages);
254 __delete_from_swap_cache(page, entry, NULL);
255 xa_unlock_irq(&address_space->i_pages);
257 put_swap_page(page, entry);
258 page_ref_sub(page, thp_nr_pages(page));
261 void clear_shadow_from_swap_cache(int type, unsigned long begin,
264 unsigned long curr = begin;
268 unsigned long nr_shadows = 0;
269 swp_entry_t entry = swp_entry(type, curr);
270 struct address_space *address_space = swap_address_space(entry);
271 XA_STATE(xas, &address_space->i_pages, curr);
273 xa_lock_irq(&address_space->i_pages);
274 xas_for_each(&xas, old, end) {
275 if (!xa_is_value(old))
277 xas_store(&xas, NULL);
280 address_space->nrexceptional -= nr_shadows;
281 xa_unlock_irq(&address_space->i_pages);
283 /* search the next swapcache until we meet end */
284 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
286 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
293 * If we are the only user, then try to free up the swap cache.
295 * Its ok to check for PageSwapCache without the page lock
296 * here because we are going to recheck again inside
297 * try_to_free_swap() _with_ the lock.
300 static inline void free_swap_cache(struct page *page)
302 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
303 try_to_free_swap(page);
309 * Perform a free_page(), also freeing any swap cache associated with
310 * this page if it is the last user of the page.
312 void free_page_and_swap_cache(struct page *page)
314 free_swap_cache(page);
315 if (!is_huge_zero_page(page))
320 * Passed an array of pages, drop them all from swapcache and then release
321 * them. They are removed from the LRU and freed if this is their last use.
323 void free_pages_and_swap_cache(struct page **pages, int nr)
325 struct page **pagep = pages;
329 for (i = 0; i < nr; i++)
330 free_swap_cache(pagep[i]);
331 release_pages(pagep, nr);
334 static inline bool swap_use_vma_readahead(void)
336 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
340 * Lookup a swap entry in the swap cache. A found page will be returned
341 * unlocked and with its refcount incremented - we rely on the kernel
342 * lock getting page table operations atomic even if we drop the page
343 * lock before returning.
345 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
349 struct swap_info_struct *si;
351 si = get_swap_device(entry);
354 page = find_get_page(swap_address_space(entry), swp_offset(entry));
357 INC_CACHE_INFO(find_total);
359 bool vma_ra = swap_use_vma_readahead();
362 INC_CACHE_INFO(find_success);
364 * At the moment, we don't support PG_readahead for anon THP
365 * so let's bail out rather than confusing the readahead stat.
367 if (unlikely(PageTransCompound(page)))
370 readahead = TestClearPageReadahead(page);
372 unsigned long ra_val;
375 ra_val = GET_SWAP_RA_VAL(vma);
376 win = SWAP_RA_WIN(ra_val);
377 hits = SWAP_RA_HITS(ra_val);
379 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
380 atomic_long_set(&vma->swap_readahead_info,
381 SWAP_RA_VAL(addr, win, hits));
385 count_vm_event(SWAP_RA_HIT);
387 atomic_inc(&swapin_readahead_hits);
395 * find_get_incore_page - Find and get a page from the page or swap caches.
396 * @mapping: The address_space to search.
397 * @index: The page cache index.
399 * This differs from find_get_page() in that it will also look for the
400 * page in the swap cache.
402 * Return: The found page or %NULL.
404 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
407 struct swap_info_struct *si;
408 struct page *page = find_get_entry(mapping, index);
412 if (!xa_is_value(page))
413 return find_subpage(page, index);
414 if (!shmem_mapping(mapping))
417 swp = radix_to_swp_entry(page);
418 /* Prevent swapoff from happening to us */
419 si = get_swap_device(swp);
422 page = find_get_page(swap_address_space(swp), swp_offset(swp));
427 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
428 struct vm_area_struct *vma, unsigned long addr,
429 bool *new_page_allocated)
431 struct swap_info_struct *si;
435 *new_page_allocated = false;
440 * First check the swap cache. Since this is normally
441 * called after lookup_swap_cache() failed, re-calling
442 * that would confuse statistics.
444 si = get_swap_device(entry);
447 page = find_get_page(swap_address_space(entry),
454 * Just skip read ahead for unused swap slot.
455 * During swap_off when swap_slot_cache is disabled,
456 * we have to handle the race between putting
457 * swap entry in swap cache and marking swap slot
458 * as SWAP_HAS_CACHE. That's done in later part of code or
459 * else swap_off will be aborted if we return NULL.
461 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
465 * Get a new page to read into from swap. Allocate it now,
466 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
467 * cause any racers to loop around until we add it to cache.
469 page = alloc_page_vma(gfp_mask, vma, addr);
474 * Swap entry may have been freed since our caller observed it.
476 err = swapcache_prepare(entry);
485 * We might race against __delete_from_swap_cache(), and
486 * stumble across a swap_map entry whose SWAP_HAS_CACHE
487 * has not yet been cleared. Or race against another
488 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
489 * in swap_map, but not yet added its page to swap cache.
495 * The swap entry is ours to swap in. Prepare the new page.
498 __SetPageLocked(page);
499 __SetPageSwapBacked(page);
501 /* May fail (-ENOMEM) if XArray node allocation failed. */
502 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
503 put_swap_page(page, entry);
507 if (mem_cgroup_charge(page, NULL, gfp_mask)) {
508 delete_from_swap_cache(page);
513 workingset_refault(page, shadow);
515 /* Caller will initiate read into locked page */
517 *new_page_allocated = true;
527 * Locate a page of swap in physical memory, reserving swap cache space
528 * and reading the disk if it is not already cached.
529 * A failure return means that either the page allocation failed or that
530 * the swap entry is no longer in use.
532 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
533 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
535 bool page_was_allocated;
536 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
537 vma, addr, &page_was_allocated);
539 if (page_was_allocated)
540 swap_readpage(retpage, do_poll);
545 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
546 unsigned long offset,
551 unsigned int pages, last_ra;
554 * This heuristic has been found to work well on both sequential and
555 * random loads, swapping to hard disk or to SSD: please don't ask
556 * what the "+ 2" means, it just happens to work well, that's all.
561 * We can have no readahead hits to judge by: but must not get
562 * stuck here forever, so check for an adjacent offset instead
563 * (and don't even bother to check whether swap type is same).
565 if (offset != prev_offset + 1 && offset != prev_offset - 1)
568 unsigned int roundup = 4;
569 while (roundup < pages)
574 if (pages > max_pages)
577 /* Don't shrink readahead too fast */
578 last_ra = prev_win / 2;
585 static unsigned long swapin_nr_pages(unsigned long offset)
587 static unsigned long prev_offset;
588 unsigned int hits, pages, max_pages;
589 static atomic_t last_readahead_pages;
591 max_pages = 1 << READ_ONCE(page_cluster);
595 hits = atomic_xchg(&swapin_readahead_hits, 0);
596 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
598 atomic_read(&last_readahead_pages));
600 WRITE_ONCE(prev_offset, offset);
601 atomic_set(&last_readahead_pages, pages);
607 * swap_cluster_readahead - swap in pages in hope we need them soon
608 * @entry: swap entry of this memory
609 * @gfp_mask: memory allocation flags
610 * @vmf: fault information
612 * Returns the struct page for entry and addr, after queueing swapin.
614 * Primitive swap readahead code. We simply read an aligned block of
615 * (1 << page_cluster) entries in the swap area. This method is chosen
616 * because it doesn't cost us any seek time. We also make sure to queue
617 * the 'original' request together with the readahead ones...
619 * This has been extended to use the NUMA policies from the mm triggering
622 * Caller must hold read mmap_lock if vmf->vma is not NULL.
624 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
625 struct vm_fault *vmf)
628 unsigned long entry_offset = swp_offset(entry);
629 unsigned long offset = entry_offset;
630 unsigned long start_offset, end_offset;
632 struct swap_info_struct *si = swp_swap_info(entry);
633 struct blk_plug plug;
634 bool do_poll = true, page_allocated;
635 struct vm_area_struct *vma = vmf->vma;
636 unsigned long addr = vmf->address;
638 mask = swapin_nr_pages(offset) - 1;
642 /* Test swap type to make sure the dereference is safe */
643 if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
644 struct inode *inode = si->swap_file->f_mapping->host;
645 if (inode_read_congested(inode))
650 /* Read a page_cluster sized and aligned cluster around offset. */
651 start_offset = offset & ~mask;
652 end_offset = offset | mask;
653 if (!start_offset) /* First page is swap header. */
655 if (end_offset >= si->max)
656 end_offset = si->max - 1;
658 blk_start_plug(&plug);
659 for (offset = start_offset; offset <= end_offset ; offset++) {
660 /* Ok, do the async read-ahead now */
661 page = __read_swap_cache_async(
662 swp_entry(swp_type(entry), offset),
663 gfp_mask, vma, addr, &page_allocated);
666 if (page_allocated) {
667 swap_readpage(page, false);
668 if (offset != entry_offset) {
669 SetPageReadahead(page);
670 count_vm_event(SWAP_RA);
675 blk_finish_plug(&plug);
677 lru_add_drain(); /* Push any new pages onto the LRU now */
679 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
682 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
684 struct address_space *spaces, *space;
687 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
688 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
691 for (i = 0; i < nr; i++) {
693 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
694 atomic_set(&space->i_mmap_writable, 0);
695 space->a_ops = &swap_aops;
696 /* swap cache doesn't use writeback related tags */
697 mapping_set_no_writeback_tags(space);
699 nr_swapper_spaces[type] = nr;
700 swapper_spaces[type] = spaces;
705 void exit_swap_address_space(unsigned int type)
707 kvfree(swapper_spaces[type]);
708 nr_swapper_spaces[type] = 0;
709 swapper_spaces[type] = NULL;
712 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
716 unsigned long *start,
719 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
720 PFN_DOWN(faddr & PMD_MASK));
721 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
722 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
725 static void swap_ra_info(struct vm_fault *vmf,
726 struct vma_swap_readahead *ra_info)
728 struct vm_area_struct *vma = vmf->vma;
729 unsigned long ra_val;
731 unsigned long faddr, pfn, fpfn;
732 unsigned long start, end;
733 pte_t *pte, *orig_pte;
734 unsigned int max_win, hits, prev_win, win, left;
739 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
740 SWAP_RA_ORDER_CEILING);
746 faddr = vmf->address;
747 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
748 entry = pte_to_swp_entry(*pte);
749 if ((unlikely(non_swap_entry(entry)))) {
754 fpfn = PFN_DOWN(faddr);
755 ra_val = GET_SWAP_RA_VAL(vma);
756 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
757 prev_win = SWAP_RA_WIN(ra_val);
758 hits = SWAP_RA_HITS(ra_val);
759 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
761 atomic_long_set(&vma->swap_readahead_info,
762 SWAP_RA_VAL(faddr, win, 0));
769 /* Copy the PTEs because the page table may be unmapped */
771 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
772 else if (pfn == fpfn + 1)
773 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
776 left = (win - 1) / 2;
777 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
780 ra_info->nr_pte = end - start;
781 ra_info->offset = fpfn - start;
782 pte -= ra_info->offset;
786 tpte = ra_info->ptes;
787 for (pfn = start; pfn != end; pfn++)
794 * swap_vma_readahead - swap in pages in hope we need them soon
795 * @fentry: swap entry of this memory
796 * @gfp_mask: memory allocation flags
797 * @vmf: fault information
799 * Returns the struct page for entry and addr, after queueing swapin.
801 * Primitive swap readahead code. We simply read in a few pages whoes
802 * virtual addresses are around the fault address in the same vma.
804 * Caller must hold read mmap_lock if vmf->vma is not NULL.
807 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
808 struct vm_fault *vmf)
810 struct blk_plug plug;
811 struct vm_area_struct *vma = vmf->vma;
817 struct vma_swap_readahead ra_info = {
821 swap_ra_info(vmf, &ra_info);
822 if (ra_info.win == 1)
825 blk_start_plug(&plug);
826 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
829 if (pte_none(pentry))
831 if (pte_present(pentry))
833 entry = pte_to_swp_entry(pentry);
834 if (unlikely(non_swap_entry(entry)))
836 page = __read_swap_cache_async(entry, gfp_mask, vma,
837 vmf->address, &page_allocated);
840 if (page_allocated) {
841 swap_readpage(page, false);
842 if (i != ra_info.offset) {
843 SetPageReadahead(page);
844 count_vm_event(SWAP_RA);
849 blk_finish_plug(&plug);
852 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
857 * swapin_readahead - swap in pages in hope we need them soon
858 * @entry: swap entry of this memory
859 * @gfp_mask: memory allocation flags
860 * @vmf: fault information
862 * Returns the struct page for entry and addr, after queueing swapin.
864 * It's a main entry function for swap readahead. By the configuration,
865 * it will read ahead blocks by cluster-based(ie, physical disk based)
866 * or vma-based(ie, virtual address based on faulty address) readahead.
868 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
869 struct vm_fault *vmf)
871 return swap_use_vma_readahead() ?
872 swap_vma_readahead(entry, gfp_mask, vmf) :
873 swap_cluster_readahead(entry, gfp_mask, vmf);
877 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
878 struct kobj_attribute *attr, char *buf)
880 return sysfs_emit(buf, "%s\n",
881 enable_vma_readahead ? "true" : "false");
883 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
884 struct kobj_attribute *attr,
885 const char *buf, size_t count)
887 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
888 enable_vma_readahead = true;
889 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
890 enable_vma_readahead = false;
896 static struct kobj_attribute vma_ra_enabled_attr =
897 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
898 vma_ra_enabled_store);
900 static struct attribute *swap_attrs[] = {
901 &vma_ra_enabled_attr.attr,
905 static const struct attribute_group swap_attr_group = {
909 static int __init swap_init_sysfs(void)
912 struct kobject *swap_kobj;
914 swap_kobj = kobject_create_and_add("swap", mm_kobj);
916 pr_err("failed to create swap kobject\n");
919 err = sysfs_create_group(swap_kobj, &swap_attr_group);
921 pr_err("failed to register swap group\n");
927 kobject_put(swap_kobj);
930 subsys_initcall(swap_init_sysfs);