1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/fault-armv.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Modifications for ARM processor (c) 1995-2002 Russell King
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/vmalloc.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/gfp.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
25 static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
27 #if __LINUX_ARM_ARCH__ < 6
29 * We take the easy way out of this problem - we make the
30 * PTE uncacheable. However, we leave the write buffer on.
32 * Note that the pte lock held when calling update_mmu_cache must also
33 * guard the pte (somewhere else in the same mm) that we modify here.
34 * Therefore those configurations which might call adjust_pte (those
35 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
37 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
38 unsigned long pfn, pte_t *ptep)
44 * If this page is present, it's actually being shared.
46 ret = pte_present(entry);
49 * If this page isn't present, or is already setup to
50 * fault (ie, is old), we can safely ignore any issues.
52 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
53 flush_cache_page(vma, address, pfn);
54 outer_flush_range((pfn << PAGE_SHIFT),
55 (pfn << PAGE_SHIFT) + PAGE_SIZE);
56 pte_val(entry) &= ~L_PTE_MT_MASK;
57 pte_val(entry) |= shared_pte_mask;
58 set_pte_at(vma->vm_mm, address, ptep, entry);
59 flush_tlb_page(vma, address);
65 #if USE_SPLIT_PTE_PTLOCKS
67 * If we are using split PTE locks, then we need to take the page
68 * lock here. Otherwise we are using shared mm->page_table_lock
69 * which is already locked, thus cannot take it.
71 static inline void do_pte_lock(spinlock_t *ptl)
74 * Use nested version here to indicate that we are already
75 * holding one similar spinlock.
77 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
80 static inline void do_pte_unlock(spinlock_t *ptl)
84 #else /* !USE_SPLIT_PTE_PTLOCKS */
85 static inline void do_pte_lock(spinlock_t *ptl) {}
86 static inline void do_pte_unlock(spinlock_t *ptl) {}
87 #endif /* USE_SPLIT_PTE_PTLOCKS */
89 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
100 pgd = pgd_offset(vma->vm_mm, address);
101 if (pgd_none_or_clear_bad(pgd))
104 p4d = p4d_offset(pgd, address);
105 if (p4d_none_or_clear_bad(p4d))
108 pud = pud_offset(p4d, address);
109 if (pud_none_or_clear_bad(pud))
112 pmd = pmd_offset(pud, address);
113 if (pmd_none_or_clear_bad(pmd))
117 * This is called while another page table is mapped, so we
118 * must use the nested version. This also means we need to
119 * open-code the spin-locking.
121 ptl = pte_lockptr(vma->vm_mm, pmd);
122 pte = pte_offset_map(pmd, address);
125 ret = do_adjust_pte(vma, address, pfn, pte);
134 make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
135 unsigned long addr, pte_t *ptep, unsigned long pfn)
137 struct mm_struct *mm = vma->vm_mm;
138 struct vm_area_struct *mpnt;
139 unsigned long offset;
143 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
146 * If we have any shared mappings that are in the same mm
147 * space, then we need to handle them specially to maintain
150 flush_dcache_mmap_lock(mapping);
151 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
153 * If this VMA is not in our MM, we can ignore it.
154 * Note that we intentionally mask out the VMA
155 * that we are fixing up.
157 if (mpnt->vm_mm != mm || mpnt == vma)
159 if (!(mpnt->vm_flags & VM_MAYSHARE))
161 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
162 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
164 flush_dcache_mmap_unlock(mapping);
166 do_adjust_pte(vma, addr, pfn, ptep);
170 * Take care of architecture specific things when placing a new PTE into
171 * a page table, or changing an existing PTE. Basically, there are two
172 * things that we need to take care of:
174 * 1. If PG_dcache_clean is not set for the page, we need to ensure
175 * that any cache entries for the kernels virtual memory
176 * range are written back to the page.
177 * 2. If we have multiple shared mappings of the same space in
178 * an object, we need to deal with the cache aliasing issues.
180 * Note that the pte lock will be held.
182 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
185 unsigned long pfn = pte_pfn(*ptep);
186 struct address_space *mapping;
193 * The zero page is never written to, so never has any dirty
194 * cache lines, and therefore never needs to be flushed.
196 page = pfn_to_page(pfn);
197 if (page == ZERO_PAGE(0))
200 mapping = page_mapping_file(page);
201 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
202 __flush_dcache_page(mapping, page);
205 make_coherent(mapping, vma, addr, ptep, pfn);
206 else if (vma->vm_flags & VM_EXEC)
207 __flush_icache_all();
210 #endif /* __LINUX_ARM_ARCH__ < 6 */
213 * Check whether the write buffer has physical address aliasing
214 * issues. If it has, we need to avoid them for the case where
215 * we have several shared mappings of the same object in user
218 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
220 register unsigned long zero = 0, one = 1, val;
234 void __init check_writebuffer_bugs(void)
240 pr_info("CPU: Testing write buffer coherency: ");
242 page = alloc_page(GFP_KERNEL);
244 unsigned long *p1, *p2;
245 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
246 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
248 p1 = vmap(&page, 1, VM_IOREMAP, prot);
249 p2 = vmap(&page, 1, VM_IOREMAP, prot);
252 v = check_writebuffer(p1, p2);
253 reason = "enabling work-around";
255 reason = "unable to map memory\n";
262 reason = "unable to grab page\n";
266 pr_cont("failed, %s\n", reason);
267 shared_pte_mask = L_PTE_MT_UNCACHED;