1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/frontswap.h>
24 #include <linux/blkdev.h>
25 #include <linux/psi.h>
26 #include <linux/uio.h>
27 #include <linux/sched/task.h>
28 #include <linux/delayacct.h>
31 static void end_swap_bio_write(struct bio *bio)
33 struct page *page = bio_first_page_all(bio);
38 * We failed to write the page out to swap-space.
39 * Re-dirty the page in order to avoid it being reclaimed.
40 * Also print a dire warning that things will go BAD (tm)
43 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
46 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
47 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
48 (unsigned long long)bio->bi_iter.bi_sector);
49 ClearPageReclaim(page);
51 end_page_writeback(page);
55 static void end_swap_bio_read(struct bio *bio)
57 struct page *page = bio_first_page_all(bio);
58 struct task_struct *waiter = bio->bi_private;
62 ClearPageUptodate(page);
63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65 (unsigned long long)bio->bi_iter.bi_sector);
69 SetPageUptodate(page);
72 WRITE_ONCE(bio->bi_private, NULL);
75 blk_wake_io_task(waiter);
76 put_task_struct(waiter);
80 int generic_swapfile_activate(struct swap_info_struct *sis,
81 struct file *swap_file,
84 struct address_space *mapping = swap_file->f_mapping;
85 struct inode *inode = mapping->host;
86 unsigned blocks_per_page;
87 unsigned long page_no;
91 sector_t lowest_block = -1;
92 sector_t highest_block = 0;
96 blkbits = inode->i_blkbits;
97 blocks_per_page = PAGE_SIZE >> blkbits;
100 * Map all the blocks into the extent tree. This code doesn't try
105 last_block = i_size_read(inode) >> blkbits;
106 while ((probe_block + blocks_per_page) <= last_block &&
107 page_no < sis->max) {
108 unsigned block_in_page;
109 sector_t first_block;
113 first_block = probe_block;
114 ret = bmap(inode, &first_block);
115 if (ret || !first_block)
119 * It must be PAGE_SIZE aligned on-disk
121 if (first_block & (blocks_per_page - 1)) {
126 for (block_in_page = 1; block_in_page < blocks_per_page;
130 block = probe_block + block_in_page;
131 ret = bmap(inode, &block);
135 if (block != first_block + block_in_page) {
142 first_block >>= (PAGE_SHIFT - blkbits);
143 if (page_no) { /* exclude the header page */
144 if (first_block < lowest_block)
145 lowest_block = first_block;
146 if (first_block > highest_block)
147 highest_block = first_block;
151 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
153 ret = add_swap_extent(sis, page_no, 1, first_block);
158 probe_block += blocks_per_page;
163 *span = 1 + highest_block - lowest_block;
165 page_no = 1; /* force Empty message */
167 sis->pages = page_no - 1;
168 sis->highest_bit = page_no - 1;
172 pr_err("swapon: swapfile has holes\n");
178 * We may have stale swap cache pages in memory: notice
179 * them here and get rid of the unnecessary final write.
181 int swap_writepage(struct page *page, struct writeback_control *wbc)
183 struct folio *folio = page_folio(page);
186 if (folio_free_swap(folio)) {
191 * Arch code may have to preserve more data than just the page
192 * contents, e.g. memory tags.
194 ret = arch_prepare_to_swap(&folio->page);
196 folio_mark_dirty(folio);
200 if (frontswap_store(&folio->page) == 0) {
201 folio_start_writeback(folio);
203 folio_end_writeback(folio);
206 ret = __swap_writepage(&folio->page, wbc);
211 static inline void count_swpout_vm_event(struct page *page)
213 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
214 if (unlikely(PageTransHuge(page)))
215 count_vm_event(THP_SWPOUT);
217 count_vm_events(PSWPOUT, thp_nr_pages(page));
220 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
221 static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
223 struct cgroup_subsys_state *css;
224 struct mem_cgroup *memcg;
226 memcg = page_memcg(page);
231 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
232 bio_associate_blkg_from_css(bio, css);
236 #define bio_associate_blkg_from_page(bio, page) do { } while (0)
237 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
241 struct bio_vec bvec[SWAP_CLUSTER_MAX];
245 static mempool_t *sio_pool;
247 int sio_pool_init(void)
250 mempool_t *pool = mempool_create_kmalloc_pool(
251 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
252 if (cmpxchg(&sio_pool, NULL, pool))
253 mempool_destroy(pool);
260 static void sio_write_complete(struct kiocb *iocb, long ret)
262 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
263 struct page *page = sio->bvec[0].bv_page;
266 if (ret != sio->len) {
268 * In the case of swap-over-nfs, this can be a
269 * temporary failure if the system has limited
270 * memory for allocating transmit buffers.
271 * Mark the page dirty and avoid
272 * folio_rotate_reclaimable but rate-limit the
273 * messages but do not flag PageError like
274 * the normal direct-to-bio case as it could
277 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
278 ret, page_file_offset(page));
279 for (p = 0; p < sio->pages; p++) {
280 page = sio->bvec[p].bv_page;
281 set_page_dirty(page);
282 ClearPageReclaim(page);
285 for (p = 0; p < sio->pages; p++)
286 count_swpout_vm_event(sio->bvec[p].bv_page);
289 for (p = 0; p < sio->pages; p++)
290 end_page_writeback(sio->bvec[p].bv_page);
292 mempool_free(sio, sio_pool);
295 static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
297 struct swap_iocb *sio = NULL;
298 struct swap_info_struct *sis = page_swap_info(page);
299 struct file *swap_file = sis->swap_file;
300 loff_t pos = page_file_offset(page);
302 set_page_writeback(page);
305 sio = *wbc->swap_plug;
307 if (sio->iocb.ki_filp != swap_file ||
308 sio->iocb.ki_pos + sio->len != pos) {
309 swap_write_unplug(sio);
314 sio = mempool_alloc(sio_pool, GFP_NOIO);
315 init_sync_kiocb(&sio->iocb, swap_file);
316 sio->iocb.ki_complete = sio_write_complete;
317 sio->iocb.ki_pos = pos;
321 sio->bvec[sio->pages].bv_page = page;
322 sio->bvec[sio->pages].bv_len = thp_size(page);
323 sio->bvec[sio->pages].bv_offset = 0;
324 sio->len += thp_size(page);
326 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
327 swap_write_unplug(sio);
331 *wbc->swap_plug = sio;
336 int __swap_writepage(struct page *page, struct writeback_control *wbc)
340 struct swap_info_struct *sis = page_swap_info(page);
342 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
344 * ->flags can be updated non-atomicially (scan_swap_map_slots),
345 * but that will never affect SWP_FS_OPS, so the data_race
348 if (data_race(sis->flags & SWP_FS_OPS))
349 return swap_writepage_fs(page, wbc);
351 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
353 count_swpout_vm_event(page);
357 bio = bio_alloc(sis->bdev, 1,
358 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
360 bio->bi_iter.bi_sector = swap_page_sector(page);
361 bio->bi_end_io = end_swap_bio_write;
362 bio_add_page(bio, page, thp_size(page), 0);
364 bio_associate_blkg_from_page(bio, page);
365 count_swpout_vm_event(page);
366 set_page_writeback(page);
373 void swap_write_unplug(struct swap_iocb *sio)
375 struct iov_iter from;
376 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
379 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
380 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
381 if (ret != -EIOCBQUEUED)
382 sio_write_complete(&sio->iocb, ret);
385 static void sio_read_complete(struct kiocb *iocb, long ret)
387 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
390 if (ret == sio->len) {
391 for (p = 0; p < sio->pages; p++) {
392 struct page *page = sio->bvec[p].bv_page;
394 SetPageUptodate(page);
397 count_vm_events(PSWPIN, sio->pages);
399 for (p = 0; p < sio->pages; p++) {
400 struct page *page = sio->bvec[p].bv_page;
403 ClearPageUptodate(page);
406 pr_alert_ratelimited("Read-error on swap-device\n");
408 mempool_free(sio, sio_pool);
411 static void swap_readpage_fs(struct page *page,
412 struct swap_iocb **plug)
414 struct swap_info_struct *sis = page_swap_info(page);
415 struct swap_iocb *sio = NULL;
416 loff_t pos = page_file_offset(page);
421 if (sio->iocb.ki_filp != sis->swap_file ||
422 sio->iocb.ki_pos + sio->len != pos) {
423 swap_read_unplug(sio);
428 sio = mempool_alloc(sio_pool, GFP_KERNEL);
429 init_sync_kiocb(&sio->iocb, sis->swap_file);
430 sio->iocb.ki_pos = pos;
431 sio->iocb.ki_complete = sio_read_complete;
435 sio->bvec[sio->pages].bv_page = page;
436 sio->bvec[sio->pages].bv_len = thp_size(page);
437 sio->bvec[sio->pages].bv_offset = 0;
438 sio->len += thp_size(page);
440 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
441 swap_read_unplug(sio);
448 int swap_readpage(struct page *page, bool synchronous,
449 struct swap_iocb **plug)
453 struct swap_info_struct *sis = page_swap_info(page);
454 bool workingset = PageWorkingset(page);
455 unsigned long pflags;
458 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
459 VM_BUG_ON_PAGE(!PageLocked(page), page);
460 VM_BUG_ON_PAGE(PageUptodate(page), page);
463 * Count submission time as memory stall and delay. When the device
464 * is congested, or the submitting cgroup IO-throttled, submission
465 * can be a significant part of overall IO time.
468 delayacct_thrashing_start(&in_thrashing);
469 psi_memstall_enter(&pflags);
471 delayacct_swapin_start();
473 if (frontswap_load(page) == 0) {
474 SetPageUptodate(page);
479 if (data_race(sis->flags & SWP_FS_OPS)) {
480 swap_readpage_fs(page, plug);
484 if (sis->flags & SWP_SYNCHRONOUS_IO) {
485 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
487 count_vm_event(PSWPIN);
493 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
494 bio->bi_iter.bi_sector = swap_page_sector(page);
495 bio->bi_end_io = end_swap_bio_read;
496 bio_add_page(bio, page, thp_size(page), 0);
498 * Keep this task valid during swap readpage because the oom killer may
499 * attempt to access it in the page fault retry time check.
502 get_task_struct(current);
503 bio->bi_private = current;
505 count_vm_event(PSWPIN);
508 while (synchronous) {
509 set_current_state(TASK_UNINTERRUPTIBLE);
510 if (!READ_ONCE(bio->bi_private))
515 __set_current_state(TASK_RUNNING);
520 delayacct_thrashing_end(&in_thrashing);
521 psi_memstall_leave(&pflags);
523 delayacct_swapin_end();
527 void __swap_read_unplug(struct swap_iocb *sio)
529 struct iov_iter from;
530 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
533 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
534 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
535 if (ret != -EIOCBQUEUED)
536 sio_read_complete(&sio->iocb, ret);