1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
19 struct notifier_block;
25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK 0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT 0
28 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
32 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
37 static inline int current_is_kswapd(void)
39 return current->flags & PF_KSWAPD;
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
50 #define MAX_SWAPFILES_SHIFT 5
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
59 * PTE markers are used to persist information onto PTEs that otherwise
60 * should be a none pte. As its name "PTE" hints, it should only be
61 * applied to the leaves of pgtables.
63 #define SWP_PTE_MARKER_NUM 1
64 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
65 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
68 * Unaddressable device memory support. See include/linux/hmm.h and
69 * Documentation/mm/hmm.rst. Short description is we need struct pages for
70 * device memory that is unaddressable (inaccessible) by CPU, so that we can
71 * migrate part of a process memory to device memory.
73 * When a page is migrated from CPU to device, we set the CPU page table entry
74 * to a special SWP_DEVICE_{READ|WRITE} entry.
76 * When a page is mapped by the device for exclusive access we set the CPU page
77 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
79 #ifdef CONFIG_DEVICE_PRIVATE
80 #define SWP_DEVICE_NUM 4
81 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
82 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
83 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
84 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
86 #define SWP_DEVICE_NUM 0
90 * Page migration support.
92 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
93 * indicates that the referenced (part of) an anonymous page is exclusive to
94 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
95 * (part of) an anonymous page that are mapped writable are exclusive to a
98 #ifdef CONFIG_MIGRATION
99 #define SWP_MIGRATION_NUM 3
100 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
101 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
102 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
104 #define SWP_MIGRATION_NUM 0
108 * Handling of hardware poisoned pages with memory corruption.
110 #ifdef CONFIG_MEMORY_FAILURE
111 #define SWP_HWPOISON_NUM 1
112 #define SWP_HWPOISON MAX_SWAPFILES
114 #define SWP_HWPOISON_NUM 0
117 #define MAX_SWAPFILES \
118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
123 * Magic header for a swap area. The first part of the union is
124 * what the swap magic looks like for the old (limited to 128MB)
125 * swap area format, the second part of the union adds - in the
126 * old reserved area - some extra information. Note that the first
127 * kilobyte is reserved for boot loader or disk label stuff...
129 * Having the magic at the end of the PAGE_SIZE makes detecting swap
130 * areas somewhat tricky on machines that support multiple page sizes.
131 * For 2.5 we'll probably want to move the magic to just beyond the
136 char reserved[PAGE_SIZE - 10];
137 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
140 char bootbits[1024]; /* Space for disklabel etc. */
144 unsigned char sws_uuid[16];
145 unsigned char sws_volume[16];
152 * current->reclaim_state points to one of these when a task is running
155 struct reclaim_state {
156 /* pages reclaimed outside of LRU-based reclaim */
157 unsigned long reclaimed;
158 #ifdef CONFIG_LRU_GEN
159 /* per-thread mm walk data */
160 struct lru_gen_mm_walk *mm_walk;
165 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
167 * @pages: number of pages reclaimed
169 * If the current process is undergoing a reclaim operation, increment the
170 * number of reclaimed pages by @pages.
172 static inline void mm_account_reclaimed_pages(unsigned long pages)
174 if (current->reclaim_state)
175 current->reclaim_state->reclaimed += pages;
180 struct address_space;
182 struct writeback_control;
186 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
187 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
188 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
189 * from setup, they're handled identically.
191 * We always assume that blocks are of size PAGE_SIZE.
194 struct rb_node rb_node;
197 sector_t start_block;
201 * Max bad pages in the new format..
203 #define MAX_SWAP_BADPAGES \
204 ((offsetof(union swap_header, magic.magic) - \
205 offsetof(union swap_header, info.badpages)) / sizeof(int))
208 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
209 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
210 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
211 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
212 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
213 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
214 SWP_BLKDEV = (1 << 6), /* its a block device */
215 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
216 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
217 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
218 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
219 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
220 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
221 /* add others here before... */
222 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
225 #define SWAP_CLUSTER_MAX 32UL
226 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
228 /* Bit flag in swap_map */
229 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
230 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
232 /* Special value in first swap_map */
233 #define SWAP_MAP_MAX 0x3e /* Max count */
234 #define SWAP_MAP_BAD 0x3f /* Note page is bad */
235 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
237 /* Special value in each swap_map continuation */
238 #define SWAP_CONT_MAX 0x7f /* Max count */
241 * We use this to track usage of a cluster. A cluster is a block of swap disk
242 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
243 * free clusters are organized into a list. We fetch an entry from the list to
244 * get a free cluster.
246 * The data field stores next cluster if the cluster is free or cluster usage
247 * counter otherwise. The flags field determines if a cluster is free. This is
248 * protected by swap_info_struct.lock.
250 struct swap_cluster_info {
252 * Protect swap_cluster_info fields
253 * and swap_info_struct->swap_map
254 * elements correspond to the swap
257 unsigned int data:24;
258 unsigned int flags:8;
260 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
261 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
262 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
265 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
266 * its own cluster and swapout sequentially. The purpose is to optimize swapout
269 struct percpu_cluster {
270 struct swap_cluster_info index; /* Current cluster index */
271 unsigned int next; /* Likely next allocation offset */
274 struct swap_cluster_list {
275 struct swap_cluster_info head;
276 struct swap_cluster_info tail;
280 * The in-memory structure used to track swap areas.
282 struct swap_info_struct {
283 struct percpu_ref users; /* indicate and keep swap device valid. */
284 unsigned long flags; /* SWP_USED etc: see above */
285 signed short prio; /* swap priority of this type */
286 struct plist_node list; /* entry in swap_active_head */
287 signed char type; /* strange name for an index */
288 unsigned int max; /* extent of the swap_map */
289 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
290 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
291 struct swap_cluster_list free_clusters; /* free clusters list */
292 unsigned int lowest_bit; /* index of first free in swap_map */
293 unsigned int highest_bit; /* index of last free in swap_map */
294 unsigned int pages; /* total of usable pages of swap */
295 unsigned int inuse_pages; /* number of those currently in use */
296 unsigned int cluster_next; /* likely index for next allocation */
297 unsigned int cluster_nr; /* countdown to next cluster search */
298 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
299 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
300 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
301 struct block_device *bdev; /* swap device or bdev of swap file */
302 struct file *swap_file; /* seldom referenced */
303 unsigned int old_block_size; /* seldom referenced */
304 struct completion comp; /* seldom referenced */
305 #ifdef CONFIG_FRONTSWAP
306 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
307 atomic_t frontswap_pages; /* frontswap pages in-use counter */
310 * protect map scan related fields like
311 * swap_map, lowest_bit, highest_bit,
312 * inuse_pages, cluster_next,
313 * cluster_nr, lowest_alloc,
314 * highest_alloc, free/discard cluster
315 * list. other fields are only changed
316 * at swapon/swapoff, so are protected
317 * by swap_lock. changing flags need
318 * hold this lock and swap_lock. If
319 * both locks need hold, hold swap_lock
322 spinlock_t cont_lock; /*
323 * protect swap count continuation page
326 struct work_struct discard_work; /* discard worker */
327 struct swap_cluster_list discard_clusters; /* discard clusters list */
328 struct plist_node avail_lists[]; /*
329 * entries in swap_avail_heads, one
331 * Must be last as the number of the
332 * array is nr_node_ids, which is not
333 * a fixed value so have to allocate
335 * And it has to be an array so that
336 * plist_for_each_* can work.
340 static inline swp_entry_t folio_swap_entry(struct folio *folio)
342 swp_entry_t entry = { .val = page_private(&folio->page) };
346 static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
348 folio->private = (void *)entry.val;
351 /* linux/mm/workingset.c */
352 bool workingset_test_recent(void *shadow, bool file, bool *workingset);
353 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
354 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
355 void workingset_refault(struct folio *folio, void *shadow);
356 void workingset_activation(struct folio *folio);
358 /* Only track the nodes of mappings with shadow entries */
359 void workingset_update_node(struct xa_node *node);
360 extern struct list_lru shadow_nodes;
361 #define mapping_set_update(xas, mapping) do { \
362 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
363 xas_set_update(xas, workingset_update_node); \
364 xas_set_lru(xas, &shadow_nodes); \
368 /* linux/mm/page_alloc.c */
369 extern unsigned long totalreserve_pages;
371 /* Definition of global_zone_page_state not available yet */
372 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
375 /* linux/mm/swap.c */
376 void lru_note_cost(struct lruvec *lruvec, bool file,
377 unsigned int nr_io, unsigned int nr_rotated);
378 void lru_note_cost_refault(struct folio *);
379 void folio_add_lru(struct folio *);
380 void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
381 void mark_page_accessed(struct page *);
382 void folio_mark_accessed(struct folio *);
384 extern atomic_t lru_disable_count;
386 static inline bool lru_cache_disabled(void)
388 return atomic_read(&lru_disable_count);
391 static inline void lru_cache_enable(void)
393 atomic_dec(&lru_disable_count);
396 extern void lru_cache_disable(void);
397 extern void lru_add_drain(void);
398 extern void lru_add_drain_cpu(int cpu);
399 extern void lru_add_drain_cpu_zone(struct zone *zone);
400 extern void lru_add_drain_all(void);
401 void folio_deactivate(struct folio *folio);
402 void folio_mark_lazyfree(struct folio *folio);
403 extern void swap_setup(void);
405 extern void lru_cache_add_inactive_or_unevictable(struct page *page,
406 struct vm_area_struct *vma);
408 /* linux/mm/vmscan.c */
409 extern unsigned long zone_reclaimable_pages(struct zone *zone);
410 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
411 gfp_t gfp_mask, nodemask_t *mask);
413 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
414 #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
415 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
416 unsigned long nr_pages,
418 unsigned int reclaim_options);
419 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
420 gfp_t gfp_mask, bool noswap,
422 unsigned long *nr_scanned);
423 extern unsigned long shrink_all_memory(unsigned long nr_pages);
424 extern int vm_swappiness;
425 long remove_mapping(struct address_space *mapping, struct folio *folio);
428 extern int node_reclaim_mode;
429 extern int sysctl_min_unmapped_ratio;
430 extern int sysctl_min_slab_ratio;
432 #define node_reclaim_mode 0
435 static inline bool node_reclaim_enabled(void)
437 /* Is any node_reclaim_mode bit set? */
438 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
441 void check_move_unevictable_folios(struct folio_batch *fbatch);
443 extern void __meminit kswapd_run(int nid);
444 extern void __meminit kswapd_stop(int nid);
448 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
449 unsigned long nr_pages, sector_t start_block);
450 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
453 static inline unsigned long total_swapcache_pages(void)
455 return global_node_page_state(NR_SWAPCACHE);
458 extern void free_swap_cache(struct page *page);
459 extern void free_page_and_swap_cache(struct page *);
460 extern void free_pages_and_swap_cache(struct encoded_page **, int);
461 /* linux/mm/swapfile.c */
462 extern atomic_long_t nr_swap_pages;
463 extern long total_swap_pages;
464 extern atomic_t nr_rotate_swap;
465 extern bool has_usable_swap(void);
467 /* Swap 50% full? Release swapcache more aggressively.. */
468 static inline bool vm_swap_full(void)
470 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
473 static inline long get_nr_swap_pages(void)
475 return atomic_long_read(&nr_swap_pages);
478 extern void si_swapinfo(struct sysinfo *);
479 swp_entry_t folio_alloc_swap(struct folio *folio);
480 bool folio_free_swap(struct folio *folio);
481 void put_swap_folio(struct folio *folio, swp_entry_t entry);
482 extern swp_entry_t get_swap_page_of_type(int);
483 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
484 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
485 extern void swap_shmem_alloc(swp_entry_t);
486 extern int swap_duplicate(swp_entry_t);
487 extern int swapcache_prepare(swp_entry_t);
488 extern void swap_free(swp_entry_t);
489 extern void swapcache_free_entries(swp_entry_t *entries, int n);
490 extern int free_swap_and_cache(swp_entry_t);
491 int swap_type_of(dev_t device, sector_t offset);
492 int find_first_swap(dev_t *device);
493 extern unsigned int count_swap_pages(int, int);
494 extern sector_t swapdev_block(int, pgoff_t);
495 extern int __swap_count(swp_entry_t entry);
496 extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
497 extern int swp_swapcount(swp_entry_t entry);
498 extern struct swap_info_struct *page_swap_info(struct page *);
499 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
500 struct backing_dev_info;
501 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
502 extern void exit_swap_address_space(unsigned int type);
503 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
504 sector_t swap_page_sector(struct page *page);
506 static inline void put_swap_device(struct swap_info_struct *si)
508 percpu_ref_put(&si->users);
511 #else /* CONFIG_SWAP */
512 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
517 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
522 static inline void put_swap_device(struct swap_info_struct *si)
526 #define get_nr_swap_pages() 0L
527 #define total_swap_pages 0L
528 #define total_swapcache_pages() 0UL
529 #define vm_swap_full() 0
531 #define si_swapinfo(val) \
532 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
533 /* only sparc can not include linux/pagemap.h in this file
534 * so leave put_page and release_pages undeclared... */
535 #define free_page_and_swap_cache(page) \
537 #define free_pages_and_swap_cache(pages, nr) \
538 release_pages((pages), (nr));
540 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
541 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
543 static inline void free_swap_cache(struct page *page)
547 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
552 static inline void swap_shmem_alloc(swp_entry_t swp)
556 static inline int swap_duplicate(swp_entry_t swp)
561 static inline void swap_free(swp_entry_t swp)
565 static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
569 static inline int __swap_count(swp_entry_t entry)
574 static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
579 static inline int swp_swapcount(swp_entry_t entry)
584 static inline swp_entry_t folio_alloc_swap(struct folio *folio)
591 static inline bool folio_free_swap(struct folio *folio)
596 static inline int add_swap_extent(struct swap_info_struct *sis,
597 unsigned long start_page,
598 unsigned long nr_pages, sector_t start_block)
602 #endif /* CONFIG_SWAP */
604 #ifdef CONFIG_THP_SWAP
605 extern int split_swap_cluster(swp_entry_t entry);
607 static inline int split_swap_cluster(swp_entry_t entry)
614 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
616 /* Cgroup2 doesn't have per-cgroup swappiness */
617 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
618 return READ_ONCE(vm_swappiness);
621 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
622 return READ_ONCE(vm_swappiness);
624 return READ_ONCE(memcg->swappiness);
627 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
629 return READ_ONCE(vm_swappiness);
634 extern u64 zswap_pool_total_size;
635 extern atomic_t zswap_stored_pages;
638 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
639 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
640 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
642 if (mem_cgroup_disabled())
644 __folio_throttle_swaprate(folio, gfp);
647 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
652 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
653 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
654 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
655 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
658 if (mem_cgroup_disabled())
660 return __mem_cgroup_try_charge_swap(folio, entry);
663 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
664 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
666 if (mem_cgroup_disabled())
668 __mem_cgroup_uncharge_swap(entry, nr_pages);
671 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
672 extern bool mem_cgroup_swap_full(struct folio *folio);
674 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
678 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
684 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
685 unsigned int nr_pages)
689 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
691 return get_nr_swap_pages();
694 static inline bool mem_cgroup_swap_full(struct folio *folio)
696 return vm_swap_full();
700 #endif /* __KERNEL__*/
701 #endif /* _LINUX_SWAP_H */