Merge tag 'gfs2-nopid-for-v6.1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / mm / damon / paddr.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7
8 #define pr_fmt(fmt) "damon-pa: " fmt
9
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15
16 #include "../internal.h"
17 #include "ops-common.h"
18
19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
20                 unsigned long addr, void *arg)
21 {
22         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
23
24         while (page_vma_mapped_walk(&pvmw)) {
25                 addr = pvmw.address;
26                 if (pvmw.pte)
27                         damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
28                 else
29                         damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
30         }
31         return true;
32 }
33
34 static void damon_pa_mkold(unsigned long paddr)
35 {
36         struct folio *folio;
37         struct page *page = damon_get_page(PHYS_PFN(paddr));
38         struct rmap_walk_control rwc = {
39                 .rmap_one = __damon_pa_mkold,
40                 .anon_lock = folio_lock_anon_vma_read,
41         };
42         bool need_lock;
43
44         if (!page)
45                 return;
46         folio = page_folio(page);
47
48         if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
49                 folio_set_idle(folio);
50                 goto out;
51         }
52
53         need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
54         if (need_lock && !folio_trylock(folio))
55                 goto out;
56
57         rmap_walk(folio, &rwc);
58
59         if (need_lock)
60                 folio_unlock(folio);
61
62 out:
63         folio_put(folio);
64 }
65
66 static void __damon_pa_prepare_access_check(struct damon_region *r)
67 {
68         r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
69
70         damon_pa_mkold(r->sampling_addr);
71 }
72
73 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
74 {
75         struct damon_target *t;
76         struct damon_region *r;
77
78         damon_for_each_target(t, ctx) {
79                 damon_for_each_region(r, t)
80                         __damon_pa_prepare_access_check(r);
81         }
82 }
83
84 struct damon_pa_access_chk_result {
85         unsigned long page_sz;
86         bool accessed;
87 };
88
89 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
90                 unsigned long addr, void *arg)
91 {
92         struct damon_pa_access_chk_result *result = arg;
93         DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
94
95         result->accessed = false;
96         result->page_sz = PAGE_SIZE;
97         while (page_vma_mapped_walk(&pvmw)) {
98                 addr = pvmw.address;
99                 if (pvmw.pte) {
100                         result->accessed = pte_young(*pvmw.pte) ||
101                                 !folio_test_idle(folio) ||
102                                 mmu_notifier_test_young(vma->vm_mm, addr);
103                 } else {
104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
105                         result->accessed = pmd_young(*pvmw.pmd) ||
106                                 !folio_test_idle(folio) ||
107                                 mmu_notifier_test_young(vma->vm_mm, addr);
108                         result->page_sz = HPAGE_PMD_SIZE;
109 #else
110                         WARN_ON_ONCE(1);
111 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
112                 }
113                 if (result->accessed) {
114                         page_vma_mapped_walk_done(&pvmw);
115                         break;
116                 }
117         }
118
119         /* If accessed, stop walking */
120         return !result->accessed;
121 }
122
123 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
124 {
125         struct folio *folio;
126         struct page *page = damon_get_page(PHYS_PFN(paddr));
127         struct damon_pa_access_chk_result result = {
128                 .page_sz = PAGE_SIZE,
129                 .accessed = false,
130         };
131         struct rmap_walk_control rwc = {
132                 .arg = &result,
133                 .rmap_one = __damon_pa_young,
134                 .anon_lock = folio_lock_anon_vma_read,
135         };
136         bool need_lock;
137
138         if (!page)
139                 return false;
140         folio = page_folio(page);
141
142         if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
143                 if (folio_test_idle(folio))
144                         result.accessed = false;
145                 else
146                         result.accessed = true;
147                 folio_put(folio);
148                 goto out;
149         }
150
151         need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
152         if (need_lock && !folio_trylock(folio)) {
153                 folio_put(folio);
154                 return false;
155         }
156
157         rmap_walk(folio, &rwc);
158
159         if (need_lock)
160                 folio_unlock(folio);
161         folio_put(folio);
162
163 out:
164         *page_sz = result.page_sz;
165         return result.accessed;
166 }
167
168 static void __damon_pa_check_access(struct damon_region *r)
169 {
170         static unsigned long last_addr;
171         static unsigned long last_page_sz = PAGE_SIZE;
172         static bool last_accessed;
173
174         /* If the region is in the last checked page, reuse the result */
175         if (ALIGN_DOWN(last_addr, last_page_sz) ==
176                                 ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
177                 if (last_accessed)
178                         r->nr_accesses++;
179                 return;
180         }
181
182         last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
183         if (last_accessed)
184                 r->nr_accesses++;
185
186         last_addr = r->sampling_addr;
187 }
188
189 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
190 {
191         struct damon_target *t;
192         struct damon_region *r;
193         unsigned int max_nr_accesses = 0;
194
195         damon_for_each_target(t, ctx) {
196                 damon_for_each_region(r, t) {
197                         __damon_pa_check_access(r);
198                         max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
199                 }
200         }
201
202         return max_nr_accesses;
203 }
204
205 static unsigned long damon_pa_pageout(struct damon_region *r)
206 {
207         unsigned long addr, applied;
208         LIST_HEAD(page_list);
209
210         for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
211                 struct page *page = damon_get_page(PHYS_PFN(addr));
212
213                 if (!page)
214                         continue;
215
216                 ClearPageReferenced(page);
217                 test_and_clear_page_young(page);
218                 if (isolate_lru_page(page)) {
219                         put_page(page);
220                         continue;
221                 }
222                 if (PageUnevictable(page)) {
223                         putback_lru_page(page);
224                 } else {
225                         list_add(&page->lru, &page_list);
226                         put_page(page);
227                 }
228         }
229         applied = reclaim_pages(&page_list);
230         cond_resched();
231         return applied * PAGE_SIZE;
232 }
233
234 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
235                 struct damon_region *r, bool mark_accessed)
236 {
237         unsigned long addr, applied = 0;
238
239         for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
240                 struct page *page = damon_get_page(PHYS_PFN(addr));
241
242                 if (!page)
243                         continue;
244                 if (mark_accessed)
245                         mark_page_accessed(page);
246                 else
247                         deactivate_page(page);
248                 put_page(page);
249                 applied++;
250         }
251         return applied * PAGE_SIZE;
252 }
253
254 static unsigned long damon_pa_mark_accessed(struct damon_region *r)
255 {
256         return damon_pa_mark_accessed_or_deactivate(r, true);
257 }
258
259 static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
260 {
261         return damon_pa_mark_accessed_or_deactivate(r, false);
262 }
263
264 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
265                 struct damon_target *t, struct damon_region *r,
266                 struct damos *scheme)
267 {
268         switch (scheme->action) {
269         case DAMOS_PAGEOUT:
270                 return damon_pa_pageout(r);
271         case DAMOS_LRU_PRIO:
272                 return damon_pa_mark_accessed(r);
273         case DAMOS_LRU_DEPRIO:
274                 return damon_pa_deactivate_pages(r);
275         case DAMOS_STAT:
276                 break;
277         default:
278                 /* DAMOS actions that not yet supported by 'paddr'. */
279                 break;
280         }
281         return 0;
282 }
283
284 static int damon_pa_scheme_score(struct damon_ctx *context,
285                 struct damon_target *t, struct damon_region *r,
286                 struct damos *scheme)
287 {
288         switch (scheme->action) {
289         case DAMOS_PAGEOUT:
290                 return damon_cold_score(context, r, scheme);
291         case DAMOS_LRU_PRIO:
292                 return damon_hot_score(context, r, scheme);
293         case DAMOS_LRU_DEPRIO:
294                 return damon_cold_score(context, r, scheme);
295         default:
296                 break;
297         }
298
299         return DAMOS_MAX_SCORE;
300 }
301
302 static int __init damon_pa_initcall(void)
303 {
304         struct damon_operations ops = {
305                 .id = DAMON_OPS_PADDR,
306                 .init = NULL,
307                 .update = NULL,
308                 .prepare_access_checks = damon_pa_prepare_access_checks,
309                 .check_accesses = damon_pa_check_accesses,
310                 .reset_aggregated = NULL,
311                 .target_valid = NULL,
312                 .cleanup = NULL,
313                 .apply_scheme = damon_pa_apply_scheme,
314                 .get_scheme_score = damon_pa_scheme_score,
315         };
316
317         return damon_register_ops(&ops);
318 };
319
320 subsys_initcall(damon_pa_initcall);