1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
12 #include <asm/asm-405.h>
15 extern int icache_44x_need_flush;
18 #endif /* __ASSEMBLY__ */
20 #define PTE_INDEX_SIZE PTE_SHIFT
21 #define PMD_INDEX_SIZE 0
22 #define PUD_INDEX_SIZE 0
23 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
25 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
26 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
29 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
30 #define PMD_TABLE_SIZE 0
31 #define PUD_TABLE_SIZE 0
32 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
33 #endif /* __ASSEMBLY__ */
35 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
39 * The normal case is that PTEs are 32-bits and we have a 1-page
40 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
42 * For any >32-bit physical address platform, we can use the following
43 * two level page table layout where the pgdir is 8KB and the MS 13 bits
44 * are an index to the second level table. The combined pgdir/pmd first
45 * level has 2048 entries and the second level has 512 64-bit PTE entries.
48 /* PGDIR_SHIFT determines what a top-level page table entry can map */
49 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
50 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51 #define PGDIR_MASK (~(PGDIR_SIZE-1))
53 /* Bits to mask out from a PGD to get to the PUD page */
54 #define PGD_MASKED_BITS 0
56 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
57 #define FIRST_USER_ADDRESS 0UL
59 #define pte_ERROR(e) \
60 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
61 (unsigned long long)pte_val(e))
62 #define pgd_ERROR(e) \
63 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
67 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
69 #endif /* !__ASSEMBLY__ */
73 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
74 * value (for now) on others, from where we can start layout kernel
75 * virtual space that goes below PKMAP and FIXMAP
77 #include <asm/fixmap.h>
80 #define KVIRT_TOP PKMAP_BASE
82 #define KVIRT_TOP FIXADDR_START
86 * ioremap_bot starts at that address. Early ioremaps move down from there,
87 * until mem_init() at which point this becomes the top of the vmalloc
90 #ifdef CONFIG_NOT_COHERENT_CACHE
91 #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
93 #define IOREMAP_TOP KVIRT_TOP
96 /* PPC32 shares vmalloc area with ioremap */
97 #define IOREMAP_START VMALLOC_START
98 #define IOREMAP_END VMALLOC_END
101 * Just any arbitrary offset to the start of the vmalloc VM area: the
102 * current 16MB value just means that there will be a 64MB "hole" after the
103 * physical memory until the kernel virtual memory starts. That means that
104 * any out-of-bounds memory accesses will hopefully be caught.
105 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
106 * area for the same reason. ;)
108 * We no longer map larger than phys RAM with the BATs so we don't have
109 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
110 * about clashes between our early calls to ioremap() that start growing down
111 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
112 * from VMALLOC_START). For this reason we have ioremap_bot to check when
113 * we actually run into our mappings setup in the early boot with the VM
114 * system. This really does become a problem for machines with good amounts
117 #define VMALLOC_OFFSET (0x1000000) /* 16M */
119 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
121 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
123 #define VMALLOC_END ioremap_bot
126 * Bits in a linux-style PTE. These match the bits in the
127 * (hardware-defined) PowerPC PTE as closely as possible.
130 #if defined(CONFIG_40x)
131 #include <asm/nohash/32/pte-40x.h>
132 #elif defined(CONFIG_44x)
133 #include <asm/nohash/32/pte-44x.h>
134 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
135 #include <asm/nohash/pte-book3e.h>
136 #elif defined(CONFIG_FSL_BOOKE)
137 #include <asm/nohash/32/pte-fsl-booke.h>
138 #elif defined(CONFIG_PPC_8xx)
139 #include <asm/nohash/32/pte-8xx.h>
143 * Location of the PFN in the PTE. Most 32-bit platforms use the same
144 * as _PAGE_SHIFT here (ie, naturally aligned).
145 * Platform who don't just pre-define the value so we don't override it here.
147 #ifndef PTE_RPN_SHIFT
148 #define PTE_RPN_SHIFT (PAGE_SHIFT)
152 * The mask covered by the RPN must be a ULL on 32-bit platforms with
155 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
156 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
158 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
162 * _PAGE_CHG_MASK masks of bits that are to be preserved across
165 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
169 #define pte_clear(mm, addr, ptep) \
170 do { pte_update(ptep, ~0, 0); } while (0)
173 static inline pte_t pte_mkwrite(pte_t pte)
175 return __pte(pte_val(pte) | _PAGE_RW);
179 static inline pte_t pte_mkdirty(pte_t pte)
181 return __pte(pte_val(pte) | _PAGE_DIRTY);
184 static inline pte_t pte_mkyoung(pte_t pte)
186 return __pte(pte_val(pte) | _PAGE_ACCESSED);
189 #ifndef pte_wrprotect
190 static inline pte_t pte_wrprotect(pte_t pte)
192 return __pte(pte_val(pte) & ~_PAGE_RW);
196 static inline pte_t pte_mkexec(pte_t pte)
198 return __pte(pte_val(pte) | _PAGE_EXEC);
201 #define pmd_none(pmd) (!pmd_val(pmd))
202 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
203 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
204 static inline void pmd_clear(pmd_t *pmdp)
212 * PTE updates. This function is called whenever an existing
213 * valid PTE is updated. This does -not- include set_pte_at()
214 * which nowadays only sets a new PTE.
216 * Depending on the type of MMU, we may need to use atomic updates
217 * and the PTE may be either 32 or 64 bit wide. In the later case,
218 * when using atomic updates, only the low part of the PTE is
219 * accessed atomically.
221 * In addition, on 44x, we also maintain a global flag indicating
222 * that an executable user mapping was modified, which is needed
223 * to properly flush the virtually tagged instruction cache of
224 * those implementations.
226 #ifndef CONFIG_PTE_64BIT
227 static inline unsigned long pte_update(pte_t *p,
231 #ifdef PTE_ATOMIC_UPDATES
232 unsigned long old, tmp;
234 __asm__ __volatile__("\
241 : "=&r" (old), "=&r" (tmp), "=m" (*p)
242 : "r" (p), "r" (clr), "r" (set), "m" (*p)
244 #else /* PTE_ATOMIC_UPDATES */
245 unsigned long old = pte_val(*p);
246 unsigned long new = (old & ~clr) | set;
248 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
249 p->pte = p->pte1 = p->pte2 = p->pte3 = new;
253 #endif /* !PTE_ATOMIC_UPDATES */
256 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
257 icache_44x_need_flush = 1;
261 #else /* CONFIG_PTE_64BIT */
262 static inline unsigned long long pte_update(pte_t *p,
266 #ifdef PTE_ATOMIC_UPDATES
267 unsigned long long old;
270 __asm__ __volatile__("\
278 : "=&r" (old), "=&r" (tmp), "=m" (*p)
279 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
281 #else /* PTE_ATOMIC_UPDATES */
282 unsigned long long old = pte_val(*p);
283 *p = __pte((old & ~(unsigned long long)clr) | set);
284 #endif /* !PTE_ATOMIC_UPDATES */
287 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
288 icache_44x_need_flush = 1;
292 #endif /* CONFIG_PTE_64BIT */
294 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
295 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
298 old = pte_update(ptep, _PAGE_ACCESSED, 0);
299 return (old & _PAGE_ACCESSED) != 0;
301 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
302 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
304 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
305 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
308 return __pte(pte_update(ptep, ~0, 0));
311 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
312 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
315 unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
316 unsigned long set = pte_val(pte_wrprotect(__pte(0)));
318 pte_update(ptep, clr, set);
321 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
322 pte_t *ptep, pte_t entry,
323 unsigned long address,
326 pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
327 pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
328 unsigned long set = pte_val(entry) & pte_val(pte_set);
329 unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
331 pte_update(ptep, clr, set);
333 flush_tlb_page(vma, address);
336 static inline int pte_young(pte_t pte)
338 return pte_val(pte) & _PAGE_ACCESSED;
341 #define __HAVE_ARCH_PTE_SAME
342 #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
345 * Note that on Book E processors, the pmd contains the kernel virtual
346 * (lowmem) address of the pte page. The physical address is less useful
347 * because everything runs with translation enabled (even the TLB miss
348 * handler). On everything else the pmd contains the physical address
349 * of the pte page. -- paulus
352 #define pmd_page_vaddr(pmd) \
353 ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
354 #define pmd_page(pmd) \
355 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
357 #define pmd_page_vaddr(pmd) \
358 ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
359 #define pmd_page(pmd) \
360 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
363 /* to find an entry in a kernel page-table-directory */
364 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
366 /* to find an entry in a page-table-directory */
367 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
368 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
370 /* Find an entry in the third-level page table.. */
371 #define pte_index(address) \
372 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
373 #define pte_offset_kernel(dir, addr) \
374 (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
376 #define pte_offset_map(dir, addr) \
377 ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
378 (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
379 #define pte_unmap(pte) kunmap_atomic(pte)
382 * Encode and decode a swap entry.
383 * Note that the bits we use in a PTE for representing a swap entry
384 * must not include the _PAGE_PRESENT bit.
387 #define __swp_type(entry) ((entry).val & 0x1f)
388 #define __swp_offset(entry) ((entry).val >> 5)
389 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
390 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
391 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
393 #endif /* !__ASSEMBLY__ */
395 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */