#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
+#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
-#include <linux/pagevec.h>
#include <linux/migrate.h>
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
- printk("Free swap = %ldkB\n",
- get_nr_swap_pages() << (PAGE_SHIFT - 10));
- printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
+ printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
+ printk("Total swap = %lukB\n", K(total_swap_pages));
}
void *get_shadow_from_swap_cache(swp_entry_t entry)
* add_to_swap_cache resembles filemap_add_folio on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int add_to_swap_cache(struct page *page, swp_entry_t entry,
+int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp, void **shadowp)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
- XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
- unsigned long i, nr = thp_nr_pages(page);
+ XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
+ unsigned long i, nr = folio_nr_pages(folio);
void *old;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageSwapCache(page), page);
- VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+ xas_set_update(&xas, workingset_update_node);
- page_ref_add(page, nr);
- SetPageSwapCache(page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
+
+ folio_ref_add(folio, nr);
+ folio_set_swapcache(folio);
+ folio->swap = entry;
do {
xas_lock_irq(&xas);
if (xas_error(&xas))
goto unlock;
for (i = 0; i < nr; i++) {
- VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
- old = xas_load(&xas);
- if (xa_is_value(old)) {
- if (shadowp)
+ VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
+ if (shadowp) {
+ old = xas_load(&xas);
+ if (xa_is_value(old))
*shadowp = old;
}
- set_page_private(page + i, entry.val + i);
- xas_store(&xas, page);
+ xas_store(&xas, folio);
xas_next(&xas);
}
address_space->nrpages += nr;
- __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
- __mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
+ __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
unlock:
xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp));
if (!xas_error(&xas))
return 0;
- ClearPageSwapCache(page);
- page_ref_sub(page, nr);
+ folio_clear_swapcache(folio);
+ folio_ref_sub(folio, nr);
return xas_error(&xas);
}
pgoff_t idx = swp_offset(entry);
XA_STATE(xas, &address_space->i_pages, idx);
+ xas_set_update(&xas, workingset_update_node);
+
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
for (i = 0; i < nr; i++) {
void *entry = xas_store(&xas, shadow);
- VM_BUG_ON_FOLIO(entry != folio, folio);
- set_page_private(folio_page(folio, i), 0);
+ VM_BUG_ON_PAGE(entry != folio, entry);
xas_next(&xas);
}
+ folio->swap.val = 0;
folio_clear_swapcache(folio);
address_space->nrpages -= nr;
__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
/*
* Add it to the swap cache.
*/
- err = add_to_swap_cache(&folio->page, entry,
+ err = add_to_swap_cache(folio, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err)
/*
return true;
fail:
- put_swap_page(&folio->page, entry);
+ put_swap_folio(folio, entry);
return false;
}
*/
void delete_from_swap_cache(struct folio *folio)
{
- swp_entry_t entry = folio_swap_entry(folio);
+ swp_entry_t entry = folio->swap;
struct address_space *address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
__delete_from_swap_cache(folio, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
- put_swap_page(&folio->page, entry);
+ put_swap_folio(folio, entry);
folio_ref_sub(folio, folio_nr_pages(folio));
}
struct address_space *address_space = swap_address_space(entry);
XA_STATE(xas, &address_space->i_pages, curr);
+ xas_set_update(&xas, workingset_update_node);
+
xa_lock_irq(&address_space->i_pages);
xas_for_each(&xas, old, end) {
if (!xa_is_value(old))
}
}
-/*
- * If we are the only user, then try to free up the swap cache.
- *
- * Its ok to check for PageSwapCache without the page lock
+/*
+ * If we are the only user, then try to free up the swap cache.
+ *
+ * Its ok to check the swapcache flag without the folio lock
* here because we are going to recheck again inside
- * try_to_free_swap() _with_ the lock.
+ * folio_free_swap() _with_ the lock.
* - Marcelo
*/
-void free_swap_cache(struct page *page)
+void free_swap_cache(struct folio *folio)
{
- if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
- try_to_free_swap(page);
- unlock_page(page);
+ if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
+ folio_trylock(folio)) {
+ folio_free_swap(folio);
+ folio_unlock(folio);
}
}
-/*
+/*
* Perform a free_page(), also freeing any swap cache associated with
* this page if it is the last user of the page.
*/
void free_page_and_swap_cache(struct page *page)
{
- free_swap_cache(page);
+ struct folio *folio = page_folio(page);
+
+ free_swap_cache(folio);
if (!is_huge_zero_page(page))
- put_page(page);
+ folio_put(folio);
}
/*
* Passed an array of pages, drop them all from swapcache and then release
* them. They are removed from the LRU and freed if this is their last use.
*/
-void free_pages_and_swap_cache(struct page **pages, int nr)
+void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
{
- struct page **pagep = pages;
- int i;
+ struct folio_batch folios;
+ unsigned int refs[PAGEVEC_SIZE];
lru_add_drain();
- for (i = 0; i < nr; i++)
- free_swap_cache(pagep[i]);
- release_pages(pagep, nr);
+ folio_batch_init(&folios);
+ for (int i = 0; i < nr; i++) {
+ struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
+
+ free_swap_cache(folio);
+ refs[folios.nr] = 1;
+ if (unlikely(encoded_page_flags(pages[i]) &
+ ENCODED_PAGE_BIT_NR_PAGES_NEXT))
+ refs[folios.nr] = encoded_nr_pages(pages[++i]);
+
+ if (folio_batch_add(&folios, folio) == 0)
+ folios_put_refs(&folios, refs);
+ }
+ if (folios.nr)
+ folios_put_refs(&folios, refs);
}
static inline bool swap_use_vma_readahead(void)
}
/*
- * Lookup a swap entry in the swap cache. A found page will be returned
+ * Lookup a swap entry in the swap cache. A found folio will be returned
* unlocked and with its refcount incremented - we rely on the kernel
- * lock getting page table operations atomic even if we drop the page
+ * lock getting page table operations atomic even if we drop the folio
* lock before returning.
+ *
+ * Caller must lock the swap device or hold a reference to keep it valid.
*/
-struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
- unsigned long addr)
+struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct page *page;
- struct swap_info_struct *si;
-
- si = get_swap_device(entry);
- if (!si)
- return NULL;
- page = find_get_page(swap_address_space(entry), swp_offset(entry));
- put_swap_device(si);
+ struct folio *folio;
- if (page) {
+ folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
+ if (!IS_ERR(folio)) {
bool vma_ra = swap_use_vma_readahead();
bool readahead;
* At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat.
*/
- if (unlikely(PageTransCompound(page)))
- return page;
+ if (unlikely(folio_test_large(folio)))
+ return folio;
- readahead = TestClearPageReadahead(page);
+ readahead = folio_test_clear_readahead(folio);
if (vma && vma_ra) {
unsigned long ra_val;
int win, hits;
if (!vma || !vma_ra)
atomic_inc(&swapin_readahead_hits);
}
+ } else {
+ folio = NULL;
}
- return page;
+ return folio;
}
/**
- * find_get_incore_page - Find and get a page from the page or swap caches.
+ * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
* @mapping: The address_space to search.
* @index: The page cache index.
*
- * This differs from find_get_page() in that it will also look for the
- * page in the swap cache.
+ * This differs from filemap_get_folio() in that it will also look for the
+ * folio in the swap cache.
*
- * Return: The found page or %NULL.
+ * Return: The found folio or %NULL.
*/
-struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
+struct folio *filemap_get_incore_folio(struct address_space *mapping,
+ pgoff_t index)
{
swp_entry_t swp;
struct swap_info_struct *si;
- struct page *page = pagecache_get_page(mapping, index,
- FGP_ENTRY | FGP_HEAD, 0);
+ struct folio *folio = filemap_get_entry(mapping, index);
- if (!page)
- return page;
- if (!xa_is_value(page))
- return find_subpage(page, index);
+ if (!folio)
+ return ERR_PTR(-ENOENT);
+ if (!xa_is_value(folio))
+ return folio;
if (!shmem_mapping(mapping))
- return NULL;
+ return ERR_PTR(-ENOENT);
- swp = radix_to_swp_entry(page);
+ swp = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(swp))
- return NULL;
+ return ERR_PTR(-ENOENT);
/* Prevent swapoff from happening to us */
si = get_swap_device(swp);
if (!si)
- return NULL;
- page = find_get_page(swap_address_space(swp), swp_offset(swp));
+ return ERR_PTR(-ENOENT);
+ index = swp_offset(swp);
+ folio = filemap_get_folio(swap_address_space(swp), index);
put_swap_device(si);
- return page;
+ return folio;
}
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated)
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+ bool skip_if_exists)
{
struct swap_info_struct *si;
- struct page *page;
+ struct folio *folio;
void *shadow = NULL;
*new_page_allocated = false;
+ si = get_swap_device(entry);
+ if (!si)
+ return NULL;
for (;;) {
int err;
/*
* First check the swap cache. Since this is normally
- * called after lookup_swap_cache() failed, re-calling
+ * called after swap_cache_get_folio() failed, re-calling
* that would confuse statistics.
*/
- si = get_swap_device(entry);
- if (!si)
- return NULL;
- page = find_get_page(swap_address_space(entry),
- swp_offset(entry));
- put_swap_device(si);
- if (page)
- return page;
+ folio = filemap_get_folio(swap_address_space(entry),
+ swp_offset(entry));
+ if (!IS_ERR(folio))
+ goto got_folio;
/*
* Just skip read ahead for unused swap slot.
* as SWAP_HAS_CACHE. That's done in later part of code or
* else swap_off will be aborted if we return NULL.
*/
- if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
- return NULL;
+ if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
+ goto fail_put_swap;
/*
- * Get a new page to read into from swap. Allocate it now,
+ * Get a new folio to read into from swap. Allocate it now,
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
* cause any racers to loop around until we add it to cache.
*/
- page = alloc_page_vma(gfp_mask, vma, addr);
- if (!page)
- return NULL;
+ folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
+ mpol, ilx, numa_node_id());
+ if (!folio)
+ goto fail_put_swap;
/*
* Swap entry may have been freed since our caller observed it.
if (!err)
break;
- put_page(page);
+ folio_put(folio);
if (err != -EEXIST)
- return NULL;
+ goto fail_put_swap;
+
+ /*
+ * Protect against a recursive call to __read_swap_cache_async()
+ * on the same entry waiting forever here because SWAP_HAS_CACHE
+ * is set but the folio is not the swap cache yet. This can
+ * happen today if mem_cgroup_swapin_charge_folio() below
+ * triggers reclaim through zswap, which may call
+ * __read_swap_cache_async() in the writeback path.
+ */
+ if (skip_if_exists)
+ goto fail_put_swap;
/*
* We might race against __delete_from_swap_cache(), and
* stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not yet been cleared. Or race against another
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
- * in swap_map, but not yet added its page to swap cache.
+ * in swap_map, but not yet added its folio to swap cache.
*/
schedule_timeout_uninterruptible(1);
}
/*
- * The swap entry is ours to swap in. Prepare the new page.
+ * The swap entry is ours to swap in. Prepare the new folio.
*/
- __SetPageLocked(page);
- __SetPageSwapBacked(page);
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry))
+ if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+ if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
mem_cgroup_swapin_uncharge_swap(entry);
if (shadow)
- workingset_refault(page_folio(page), shadow);
+ workingset_refault(folio, shadow);
- /* Caller will initiate read into locked page */
- lru_cache_add(page);
+ /* Caller will initiate read into locked folio */
+ folio_add_lru(folio);
*new_page_allocated = true;
- return page;
+got_folio:
+ put_swap_device(si);
+ return folio;
fail_unlock:
- put_swap_page(page, entry);
- unlock_page(page);
- put_page(page);
+ put_swap_folio(folio, entry);
+ folio_unlock(folio);
+ folio_put(folio);
+fail_put_swap:
+ put_swap_device(si);
return NULL;
}
* and reading the disk if it is not already cached.
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
+ *
+ * get/put_swap_device() aren't needed to call this function, because
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
+ * swap cache folio lock.
*/
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr, bool do_poll,
- struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug)
{
- bool page_was_allocated;
- struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
- vma, addr, &page_was_allocated);
-
- if (page_was_allocated)
- swap_readpage(retpage, do_poll, plug);
-
- return retpage;
+ bool page_allocated;
+ struct mempolicy *mpol;
+ pgoff_t ilx;
+ struct folio *folio;
+
+ mpol = get_vma_policy(vma, addr, 0, &ilx);
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
+ mpol_cond_put(mpol);
+
+ if (page_allocated)
+ swap_read_folio(folio, false, plug);
+ return folio;
}
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
* swap_cluster_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
- * @vmf: fault information
+ * @mpol: NUMA memory allocation policy to be applied
+ * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
*
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold read mmap_lock if vmf->vma is not NULL.
+ * Note: it is intentional that the same NUMA policy and interleave index
+ * are used for every page of the readahead: neighbouring pages on swap
+ * are fairly likely to have been swapped out from the same node.
*/
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_fault *vmf)
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t ilx)
{
- struct page *page;
+ struct folio *folio;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
struct swap_info_struct *si = swp_swap_info(entry);
struct blk_plug plug;
struct swap_iocb *splug = NULL;
- bool do_poll = true, page_allocated;
- struct vm_area_struct *vma = vmf->vma;
- unsigned long addr = vmf->address;
+ bool page_allocated;
mask = swapin_nr_pages(offset) - 1;
if (!mask)
goto skip;
- do_poll = false;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
end_offset = offset | mask;
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
- page = __read_swap_cache_async(
- swp_entry(swp_type(entry), offset),
- gfp_mask, vma, addr, &page_allocated);
- if (!page)
+ folio = __read_swap_cache_async(
+ swp_entry(swp_type(entry), offset),
+ gfp_mask, mpol, ilx, &page_allocated, false);
+ if (!folio)
continue;
if (page_allocated) {
- swap_readpage(page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (offset != entry_offset) {
- SetPageReadahead(page);
+ folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
- put_page(page);
+ folio_put(folio);
}
blk_finish_plug(&plug);
swap_read_unplug(splug);
-
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
/* The page was likely read above, so no need for plugging here */
- return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
+ if (unlikely(page_allocated)) {
+ zswap_folio_swapin(folio);
+ swap_read_folio(folio, false, NULL);
+ }
+ return folio;
}
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
swapper_spaces[type] = NULL;
}
-static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
- unsigned long faddr,
- unsigned long lpfn,
- unsigned long rpfn,
- unsigned long *start,
- unsigned long *end)
-{
- *start = max3(lpfn, PFN_DOWN(vma->vm_start),
- PFN_DOWN(faddr & PMD_MASK));
- *end = min3(rpfn, PFN_DOWN(vma->vm_end),
- PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
-}
+#define SWAP_RA_ORDER_CEILING 5
+
+struct vma_swap_readahead {
+ unsigned short win;
+ unsigned short offset;
+ unsigned short nr_pte;
+};
static void swap_ra_info(struct vm_fault *vmf,
- struct vma_swap_readahead *ra_info)
+ struct vma_swap_readahead *ra_info)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long ra_val;
- unsigned long faddr, pfn, fpfn;
+ unsigned long faddr, pfn, fpfn, lpfn, rpfn;
unsigned long start, end;
- pte_t *pte, *orig_pte;
- unsigned int max_win, hits, prev_win, win, left;
-#ifndef CONFIG_64BIT
- pte_t *tpte;
-#endif
+ unsigned int max_win, hits, prev_win, win;
max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
SWAP_RA_ORDER_CEILING);
}
faddr = vmf->address;
- orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
-
fpfn = PFN_DOWN(faddr);
ra_val = GET_SWAP_RA_VAL(vma);
pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
max_win, prev_win);
atomic_long_set(&vma->swap_readahead_info,
SWAP_RA_VAL(faddr, win, 0));
-
- if (win == 1) {
- pte_unmap(orig_pte);
+ if (win == 1)
return;
- }
- /* Copy the PTEs because the page table may be unmapped */
- if (fpfn == pfn + 1)
- swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
- else if (pfn == fpfn + 1)
- swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
- &start, &end);
- else {
- left = (win - 1) / 2;
- swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
- &start, &end);
+ if (fpfn == pfn + 1) {
+ lpfn = fpfn;
+ rpfn = fpfn + win;
+ } else if (pfn == fpfn + 1) {
+ lpfn = fpfn - win + 1;
+ rpfn = fpfn + 1;
+ } else {
+ unsigned int left = (win - 1) / 2;
+
+ lpfn = fpfn - left;
+ rpfn = fpfn + win - left;
}
+ start = max3(lpfn, PFN_DOWN(vma->vm_start),
+ PFN_DOWN(faddr & PMD_MASK));
+ end = min3(rpfn, PFN_DOWN(vma->vm_end),
+ PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
+
ra_info->nr_pte = end - start;
ra_info->offset = fpfn - start;
- pte -= ra_info->offset;
-#ifdef CONFIG_64BIT
- ra_info->ptes = pte;
-#else
- tpte = ra_info->ptes;
- for (pfn = start; pfn != end; pfn++)
- *tpte++ = *pte++;
-#endif
- pte_unmap(orig_pte);
}
/**
* swap_vma_readahead - swap in pages in hope we need them soon
- * @fentry: swap entry of this memory
+ * @targ_entry: swap entry of the targeted memory
* @gfp_mask: memory allocation flags
+ * @mpol: NUMA memory allocation policy to be applied
+ * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* @vmf: fault information
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma.
* Caller must hold read mmap_lock if vmf->vma is not NULL.
*
*/
-static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
- struct vm_fault *vmf)
+static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
{
struct blk_plug plug;
struct swap_iocb *splug = NULL;
- struct vm_area_struct *vma = vmf->vma;
- struct page *page;
- pte_t *pte, pentry;
+ struct folio *folio;
+ pte_t *pte = NULL, pentry;
+ unsigned long addr;
swp_entry_t entry;
+ pgoff_t ilx;
unsigned int i;
bool page_allocated;
struct vma_swap_readahead ra_info = {
if (ra_info.win == 1)
goto skip;
+ addr = vmf->address - (ra_info.offset * PAGE_SIZE);
+ ilx = targ_ilx - ra_info.offset;
+
blk_start_plug(&plug);
- for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
- i++, pte++) {
- pentry = *pte;
+ for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) {
+ if (!pte++) {
+ pte = pte_offset_map(vmf->pmd, addr);
+ if (!pte)
+ break;
+ }
+ pentry = ptep_get_lockless(pte);
if (!is_swap_pte(pentry))
continue;
entry = pte_to_swp_entry(pentry);
if (unlikely(non_swap_entry(entry)))
continue;
- page = __read_swap_cache_async(entry, gfp_mask, vma,
- vmf->address, &page_allocated);
- if (!page)
+ pte_unmap(pte);
+ pte = NULL;
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
+ if (!folio)
continue;
if (page_allocated) {
- swap_readpage(page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (i != ra_info.offset) {
- SetPageReadahead(page);
+ folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
- put_page(page);
+ folio_put(folio);
}
+ if (pte)
+ pte_unmap(pte);
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain();
skip:
- /* The page was likely read above, so no need for plugging here */
- return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
- ra_info.win == 1, NULL);
+ /* The folio was likely read above, so no need for plugging here */
+ folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
+ &page_allocated, false);
+ if (unlikely(page_allocated)) {
+ zswap_folio_swapin(folio);
+ swap_read_folio(folio, false, NULL);
+ }
+ return folio;
}
/**
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
- return swap_use_vma_readahead() ?
- swap_vma_readahead(entry, gfp_mask, vmf) :
- swap_cluster_readahead(entry, gfp_mask, vmf);
+ struct mempolicy *mpol;
+ pgoff_t ilx;
+ struct folio *folio;
+
+ mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
+ folio = swap_use_vma_readahead() ?
+ swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
+ swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
+ mpol_cond_put(mpol);
+
+ if (!folio)
+ return NULL;
+ return folio_file_page(folio, swp_offset(entry));
}
#ifdef CONFIG_SYSFS