1 // SPDX-License-Identifier: GPL-2.0
5 * Generic pgtable methods declared in linux/pgtable.h
7 * Copyright (C) 2010 Linus Torvalds
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pgtable.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mm_inline.h>
16 #include <asm/pgalloc.h>
20 * If a p?d_bad entry is found while walking page tables, report
21 * the error, before resetting entry to p?d_none. Usually (but
22 * very seldom) called out from the p?d_none_or_clear_bad macros.
25 void pgd_clear_bad(pgd_t *pgd)
31 #ifndef __PAGETABLE_P4D_FOLDED
32 void p4d_clear_bad(p4d_t *p4d)
39 #ifndef __PAGETABLE_PUD_FOLDED
40 void pud_clear_bad(pud_t *pud)
48 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
49 * above. pmd folding is special and typically pmd_* macros refer to upper
50 * level even when folded
52 void pmd_clear_bad(pmd_t *pmd)
58 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
60 * Only sets the access flags (dirty, accessed), as well as write
61 * permission. Furthermore, we know it always gets set to a "more
62 * permissive" setting, which allows most architectures to optimize
63 * this. We return whether the PTE actually changed, which in turn
64 * instructs the caller to do things like update__mmu_cache. This
65 * used to be done in the caller, but sparc needs minor faults to
66 * force that call on sun4c so we changed this macro slightly
68 int ptep_set_access_flags(struct vm_area_struct *vma,
69 unsigned long address, pte_t *ptep,
70 pte_t entry, int dirty)
72 int changed = !pte_same(ptep_get(ptep), entry);
74 set_pte_at(vma->vm_mm, address, ptep, entry);
75 flush_tlb_fix_spurious_fault(vma, address, ptep);
81 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
82 int ptep_clear_flush_young(struct vm_area_struct *vma,
83 unsigned long address, pte_t *ptep)
86 young = ptep_test_and_clear_young(vma, address, ptep);
88 flush_tlb_page(vma, address);
93 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
94 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
97 struct mm_struct *mm = (vma)->vm_mm;
99 pte = ptep_get_and_clear(mm, address, ptep);
100 if (pte_accessible(mm, pte))
101 flush_tlb_page(vma, address);
106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
108 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
109 int pmdp_set_access_flags(struct vm_area_struct *vma,
110 unsigned long address, pmd_t *pmdp,
111 pmd_t entry, int dirty)
113 int changed = !pmd_same(*pmdp, entry);
114 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
116 set_pmd_at(vma->vm_mm, address, pmdp, entry);
117 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
123 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
124 int pmdp_clear_flush_young(struct vm_area_struct *vma,
125 unsigned long address, pmd_t *pmdp)
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 young = pmdp_test_and_clear_young(vma, address, pmdp);
131 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
136 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
137 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
141 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
142 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
144 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
145 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
149 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
150 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
155 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
156 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
157 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
158 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
164 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
165 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
168 assert_spin_locked(pmd_lockptr(mm, pmdp));
171 if (!pmd_huge_pte(mm, pmdp))
172 INIT_LIST_HEAD(&pgtable->lru);
174 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
175 pmd_huge_pte(mm, pmdp) = pgtable;
179 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
180 /* no "address" argument so destroys page coloring of some arch */
181 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
185 assert_spin_locked(pmd_lockptr(mm, pmdp));
188 pgtable = pmd_huge_pte(mm, pmdp);
189 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
191 if (pmd_huge_pte(mm, pmdp))
192 list_del(&pgtable->lru);
197 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
198 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
201 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
202 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
207 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
208 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
211 return pmdp_invalidate(vma, address, pmdp);
215 #ifndef pmdp_collapse_flush
216 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
220 * pmd and hugepage pte format are same. So we could
221 * use the same function.
225 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
226 VM_BUG_ON(pmd_trans_huge(*pmdp));
227 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
229 /* collapse entails shooting down ptes not pmd */
230 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
235 /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */
236 #ifndef pte_free_defer
237 static void pte_free_now(struct rcu_head *head)
241 page = container_of(head, struct page, rcu_head);
242 pte_free(NULL /* mm not passed and not used */, (pgtable_t)page);
245 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
250 call_rcu(&page->rcu_head, pte_free_now);
252 #endif /* pte_free_defer */
253 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
255 #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \
256 (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU))
258 * See the comment above ptep_get_lockless() in include/linux/pgtable.h:
259 * the barriers in pmdp_get_lockless() cannot guarantee that the value in
260 * pmd_high actually belongs with the value in pmd_low; but holding interrupts
261 * off blocks the TLB flush between present updates, which guarantees that a
262 * successful __pte_offset_map() points to a page from matched halves.
264 static unsigned long pmdp_get_lockless_start(void)
266 unsigned long irqflags;
268 local_irq_save(irqflags);
271 static void pmdp_get_lockless_end(unsigned long irqflags)
273 local_irq_restore(irqflags);
276 static unsigned long pmdp_get_lockless_start(void) { return 0; }
277 static void pmdp_get_lockless_end(unsigned long irqflags) { }
280 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
282 unsigned long irqflags;
286 irqflags = pmdp_get_lockless_start();
287 pmdval = pmdp_get_lockless(pmd);
288 pmdp_get_lockless_end(irqflags);
292 if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
294 if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval)))
296 if (unlikely(pmd_bad(pmdval))) {
300 return __pte_map(&pmdval, addr);
306 pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
307 unsigned long addr, spinlock_t **ptlp)
312 pte = __pte_offset_map(pmd, addr, &pmdval);
314 *ptlp = pte_lockptr(mm, &pmdval);
318 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
319 unsigned long addr, spinlock_t **ptlp)
325 pte = __pte_offset_map(pmd, addr, &pmdval);
328 ptl = pte_lockptr(mm, &pmdval);
330 if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
334 pte_unmap_unlock(pte, ptl);