1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
13 struct migration_target_control;
16 * Return values from addresss_space_operations.migratepage():
17 * - negative errno on page migration failure;
18 * - zero on page migration success;
20 #define MIGRATEPAGE_SUCCESS 0
23 * struct movable_operations - Driver page migration
25 * The VM calls this function to prepare the page to be moved. The page
26 * is locked and the driver should not unlock it. The driver should
27 * return ``true`` if the page is movable and ``false`` if it is not
28 * currently movable. After this function returns, the VM uses the
29 * page->lru field, so the driver must preserve any information which
30 * is usually stored here.
33 * After isolation, the VM calls this function with the isolated
34 * @src page. The driver should copy the contents of the
35 * @src page to the @dst page and set up the fields of @dst page.
36 * Both pages are locked.
37 * If page migration is successful, the driver should call
38 * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
39 * If the driver cannot migrate the page at the moment, it can return
40 * -EAGAIN. The VM interprets this as a temporary migration failure and
41 * will retry it later. Any other error value is a permanent migration
42 * failure and migration will not be retried.
43 * The driver shouldn't touch the @src->lru field while in the
44 * migrate_page() function. It may write to @dst->lru.
47 * If migration fails on the isolated page, the VM informs the driver
48 * that the page is no longer a candidate for migration by calling
49 * this function. The driver should put the isolated page back into
50 * its own data structure.
52 struct movable_operations {
53 bool (*isolate_page)(struct page *, isolate_mode_t);
54 int (*migrate_page)(struct page *dst, struct page *src,
56 void (*putback_page)(struct page *);
59 /* Defined in mm/debug.c: */
60 extern const char *migrate_reason_names[MR_TYPES];
62 #ifdef CONFIG_MIGRATION
64 extern void putback_movable_pages(struct list_head *l);
65 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
66 struct folio *src, enum migrate_mode mode, int extra_count);
67 int migrate_folio(struct address_space *mapping, struct folio *dst,
68 struct folio *src, enum migrate_mode mode);
69 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
70 unsigned long private, enum migrate_mode mode, int reason,
71 unsigned int *ret_succeeded);
72 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
73 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
75 int migrate_huge_page_move_mapping(struct address_space *mapping,
76 struct folio *dst, struct folio *src);
77 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
79 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
80 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
81 int folio_migrate_mapping(struct address_space *mapping,
82 struct folio *newfolio, struct folio *folio, int extra_count);
86 static inline void putback_movable_pages(struct list_head *l) {}
87 static inline int migrate_pages(struct list_head *l, new_page_t new,
88 free_page_t free, unsigned long private, enum migrate_mode mode,
89 int reason, unsigned int *ret_succeeded)
91 static inline struct page *alloc_migration_target(struct page *page,
92 unsigned long private)
94 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
97 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
98 struct folio *dst, struct folio *src)
103 #endif /* CONFIG_MIGRATION */
105 #ifdef CONFIG_COMPACTION
106 bool PageMovable(struct page *page);
107 void __SetPageMovable(struct page *page, const struct movable_operations *ops);
108 void __ClearPageMovable(struct page *page);
110 static inline bool PageMovable(struct page *page) { return false; }
111 static inline void __SetPageMovable(struct page *page,
112 const struct movable_operations *ops)
115 static inline void __ClearPageMovable(struct page *page)
120 static inline bool folio_test_movable(struct folio *folio)
122 return PageMovable(&folio->page);
126 const struct movable_operations *page_movable_ops(struct page *page)
128 VM_BUG_ON(!__PageMovable(page));
130 return (const struct movable_operations *)
131 ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
134 #ifdef CONFIG_NUMA_BALANCING
135 extern int migrate_misplaced_page(struct page *page,
136 struct vm_area_struct *vma, int node);
138 static inline int migrate_misplaced_page(struct page *page,
139 struct vm_area_struct *vma, int node)
141 return -EAGAIN; /* can't migrate now */
143 #endif /* CONFIG_NUMA_BALANCING */
145 #ifdef CONFIG_MIGRATION
148 * Watch out for PAE architecture, which has an unsigned long, and might not
149 * have enough bits to store all physical address and flags. So far we have
150 * enough room for all our flags.
152 #define MIGRATE_PFN_VALID (1UL << 0)
153 #define MIGRATE_PFN_MIGRATE (1UL << 1)
154 #define MIGRATE_PFN_WRITE (1UL << 3)
155 #define MIGRATE_PFN_SHIFT 6
157 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
159 if (!(mpfn & MIGRATE_PFN_VALID))
161 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
164 static inline unsigned long migrate_pfn(unsigned long pfn)
166 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
169 enum migrate_vma_direction {
170 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
171 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
172 MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
176 struct vm_area_struct *vma;
178 * Both src and dst array must be big enough for
179 * (end - start) >> PAGE_SHIFT entries.
181 * The src array must not be modified by the caller after
182 * migrate_vma_setup(), and must not change the dst array after
183 * migrate_vma_pages() returns.
187 unsigned long cpages;
188 unsigned long npages;
193 * Set to the owner value also stored in page->pgmap->owner for
194 * migrating out of device private memory. The flags also need to
195 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
196 * The caller should always set this field when using mmu notifier
197 * callbacks to avoid device MMU invalidations for device private
198 * pages that are not being migrated.
204 * Set to vmf->page if this is being called to migrate a page as part of
205 * a migrate_to_ram() callback.
207 struct page *fault_page;
210 int migrate_vma_setup(struct migrate_vma *args);
211 void migrate_vma_pages(struct migrate_vma *migrate);
212 void migrate_vma_finalize(struct migrate_vma *migrate);
213 int migrate_device_range(unsigned long *src_pfns, unsigned long start,
214 unsigned long npages);
215 void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
216 unsigned long npages);
217 void migrate_device_finalize(unsigned long *src_pfns,
218 unsigned long *dst_pfns, unsigned long npages);
220 #endif /* CONFIG_MIGRATION */
222 #endif /* _LINUX_MIGRATE_H */