1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
7 static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
8 unsigned long end, struct mm_walk *walk)
10 const struct mm_walk_ops *ops = walk->ops;
14 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
25 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
33 pte = pte_offset_map(pmd, addr);
34 err = walk_pte_range_inner(pte, addr, end, walk);
37 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
38 err = walk_pte_range_inner(pte, addr, end, walk);
39 pte_unmap_unlock(pte, ptl);
45 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
50 const struct mm_walk_ops *ops = walk->ops;
53 pmd = pmd_offset(pud, addr);
56 next = pmd_addr_end(addr, end);
57 if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
59 err = ops->pte_hole(addr, next, walk);
65 walk->action = ACTION_SUBTREE;
68 * This implies that each ->pmd_entry() handler
69 * needs to know about pmd_trans_huge() pmds
72 err = ops->pmd_entry(pmd, addr, next, walk);
76 if (walk->action == ACTION_AGAIN)
80 * Check this here so we only break down trans_huge
81 * pages when we _need_ to
83 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
84 walk->action == ACTION_CONTINUE ||
89 split_huge_pmd(walk->vma, pmd, addr);
90 if (pmd_trans_unstable(pmd))
94 err = walk_pte_range(pmd, addr, next, walk);
97 } while (pmd++, addr = next, addr != end);
102 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
103 struct mm_walk *walk)
107 const struct mm_walk_ops *ops = walk->ops;
110 pud = pud_offset(p4d, addr);
113 next = pud_addr_end(addr, end);
114 if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
116 err = ops->pte_hole(addr, next, walk);
122 walk->action = ACTION_SUBTREE;
125 err = ops->pud_entry(pud, addr, next, walk);
129 if (walk->action == ACTION_AGAIN)
132 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
133 walk->action == ACTION_CONTINUE ||
134 !(ops->pmd_entry || ops->pte_entry))
138 split_huge_pud(walk->vma, pud, addr);
142 err = walk_pmd_range(pud, addr, next, walk);
145 } while (pud++, addr = next, addr != end);
150 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
151 struct mm_walk *walk)
155 const struct mm_walk_ops *ops = walk->ops;
158 p4d = p4d_offset(pgd, addr);
160 next = p4d_addr_end(addr, end);
161 if (p4d_none_or_clear_bad(p4d)) {
163 err = ops->pte_hole(addr, next, walk);
168 if (ops->p4d_entry) {
169 err = ops->p4d_entry(p4d, addr, next, walk);
173 if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
174 err = walk_pud_range(p4d, addr, next, walk);
177 } while (p4d++, addr = next, addr != end);
182 static int walk_pgd_range(unsigned long addr, unsigned long end,
183 struct mm_walk *walk)
187 const struct mm_walk_ops *ops = walk->ops;
190 pgd = pgd_offset(walk->mm, addr);
192 next = pgd_addr_end(addr, end);
193 if (pgd_none_or_clear_bad(pgd)) {
195 err = ops->pte_hole(addr, next, walk);
200 if (ops->pgd_entry) {
201 err = ops->pgd_entry(pgd, addr, next, walk);
205 if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
207 err = walk_p4d_range(pgd, addr, next, walk);
210 } while (pgd++, addr = next, addr != end);
215 #ifdef CONFIG_HUGETLB_PAGE
216 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
219 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
220 return boundary < end ? boundary : end;
223 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
224 struct mm_walk *walk)
226 struct vm_area_struct *vma = walk->vma;
227 struct hstate *h = hstate_vma(vma);
229 unsigned long hmask = huge_page_mask(h);
230 unsigned long sz = huge_page_size(h);
232 const struct mm_walk_ops *ops = walk->ops;
236 next = hugetlb_entry_end(h, addr, end);
237 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
240 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
241 else if (ops->pte_hole)
242 err = ops->pte_hole(addr, next, walk);
246 } while (addr = next, addr != end);
251 #else /* CONFIG_HUGETLB_PAGE */
252 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
253 struct mm_walk *walk)
258 #endif /* CONFIG_HUGETLB_PAGE */
261 * Decide whether we really walk over the current vma on [@start, @end)
262 * or skip it via the returned value. Return 0 if we do walk over the
263 * current vma, and return 1 if we skip the vma. Negative values means
264 * error, where we abort the current walk.
266 static int walk_page_test(unsigned long start, unsigned long end,
267 struct mm_walk *walk)
269 struct vm_area_struct *vma = walk->vma;
270 const struct mm_walk_ops *ops = walk->ops;
273 return ops->test_walk(start, end, walk);
276 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
277 * range, so we don't walk over it as we do for normal vmas. However,
278 * Some callers are interested in handling hole range and they don't
279 * want to just ignore any single address range. Such users certainly
280 * define their ->pte_hole() callbacks, so let's delegate them to handle
283 if (vma->vm_flags & VM_PFNMAP) {
286 err = ops->pte_hole(start, end, walk);
287 return err ? err : 1;
292 static int __walk_page_range(unsigned long start, unsigned long end,
293 struct mm_walk *walk)
296 struct vm_area_struct *vma = walk->vma;
297 const struct mm_walk_ops *ops = walk->ops;
299 if (vma && ops->pre_vma) {
300 err = ops->pre_vma(start, end, walk);
305 if (vma && is_vm_hugetlb_page(vma)) {
306 if (ops->hugetlb_entry)
307 err = walk_hugetlb_range(start, end, walk);
309 err = walk_pgd_range(start, end, walk);
311 if (vma && ops->post_vma)
318 * walk_page_range - walk page table with caller specific callbacks
319 * @mm: mm_struct representing the target process of page table walk
320 * @start: start address of the virtual address range
321 * @end: end address of the virtual address range
322 * @ops: operation to call during the walk
323 * @private: private data for callbacks' usage
325 * Recursively walk the page table tree of the process represented by @mm
326 * within the virtual address range [@start, @end). During walking, we can do
327 * some caller-specific works for each entry, by setting up pmd_entry(),
328 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
329 * callbacks, the associated entries/pages are just ignored.
330 * The return values of these callbacks are commonly defined like below:
332 * - 0 : succeeded to handle the current entry, and if you don't reach the
333 * end address yet, continue to walk.
334 * - >0 : succeeded to handle the current entry, and return to the caller
335 * with caller specific value.
336 * - <0 : failed to handle the current entry, and return to the caller
339 * Before starting to walk page table, some callers want to check whether
340 * they really want to walk over the current vma, typically by checking
341 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
344 * If operations need to be staged before and committed after a vma is walked,
345 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
346 * since it is intended to handle commit-type operations, can't return any
349 * struct mm_walk keeps current values of some common data like vma and pmd,
350 * which are useful for the access from callbacks. If you want to pass some
351 * caller-specific data to callbacks, @private should be helpful.
354 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_sem,
355 * because these function traverse vma list and/or access to vma's data.
357 int walk_page_range(struct mm_struct *mm, unsigned long start,
358 unsigned long end, const struct mm_walk_ops *ops,
363 struct vm_area_struct *vma;
364 struct mm_walk walk = {
376 lockdep_assert_held(&walk.mm->mmap_sem);
378 vma = find_vma(walk.mm, start);
380 if (!vma) { /* after the last vma */
383 } else if (start < vma->vm_start) { /* outside vma */
385 next = min(end, vma->vm_start);
386 } else { /* inside vma */
388 next = min(end, vma->vm_end);
391 err = walk_page_test(start, next, &walk);
394 * positive return values are purely for
395 * controlling the pagewalk, so should never
396 * be passed to the callers.
404 if (walk.vma || walk.ops->pte_hole)
405 err = __walk_page_range(start, next, &walk);
408 } while (start = next, start < end);
413 * Similar to walk_page_range() but can walk any page tables even if they are
414 * not backed by VMAs. Because 'unusual' entries may be walked this function
415 * will also not lock the PTEs for the pte_entry() callback. This is useful for
416 * walking the kernel pages tables or page tables for firmware.
418 int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
419 unsigned long end, const struct mm_walk_ops *ops,
422 struct mm_walk walk = {
429 if (start >= end || !walk.mm)
432 lockdep_assert_held(&walk.mm->mmap_sem);
434 return __walk_page_range(start, end, &walk);
437 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
440 struct mm_walk walk = {
451 lockdep_assert_held(&walk.mm->mmap_sem);
453 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
458 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
462 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
463 * @mapping: Pointer to the struct address_space
464 * @first_index: First page offset in the address_space
465 * @nr: Number of incremental page offsets to cover
466 * @ops: operation to call during the walk
467 * @private: private data for callbacks' usage
469 * This function walks all memory areas mapped into a struct address_space.
470 * The walk is limited to only the given page-size index range, but if
471 * the index boundaries cross a huge page-table entry, that entry will be
474 * Also see walk_page_range() for additional information.
477 * This function can't require that the struct mm_struct::mmap_sem is held,
478 * since @mapping may be mapped by multiple processes. Instead
479 * @mapping->i_mmap_rwsem must be held. This might have implications in the
480 * callbacks, and it's up tho the caller to ensure that the
481 * struct mm_struct::mmap_sem is not needed.
483 * Also this means that a caller can't rely on the struct
484 * vm_area_struct::vm_flags to be constant across a call,
485 * except for immutable flags. Callers requiring this shouldn't use
488 * Return: 0 on success, negative error code on failure, positive number on
489 * caller defined premature termination.
491 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
492 pgoff_t nr, const struct mm_walk_ops *ops,
495 struct mm_walk walk = {
499 struct vm_area_struct *vma;
500 pgoff_t vba, vea, cba, cea;
501 unsigned long start_addr, end_addr;
504 lockdep_assert_held(&mapping->i_mmap_rwsem);
505 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
506 first_index + nr - 1) {
507 /* Clip to the vma */
509 vea = vba + vma_pages(vma);
512 cea = first_index + nr;
515 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
516 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
517 if (start_addr >= end_addr)
521 walk.mm = vma->vm_mm;
523 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
530 err = __walk_page_range(start_addr, end_addr, &walk);