1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
12 #include <asm/pgtable-bits.h>
15 #define KERNEL_LINK_ADDR PAGE_OFFSET
18 #define ADDRESS_SPACE_END (UL(-1))
21 /* Leave 2GB for kernel and BPF at the end of the address space */
22 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
24 #define KERNEL_LINK_ADDR PAGE_OFFSET
27 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
28 #define VMALLOC_END (PAGE_OFFSET - 1)
29 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
31 #define BPF_JIT_REGION_SIZE (SZ_128M)
33 /* KASLR should leave at least 128MB for BPF after the kernel */
34 #define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
35 #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
37 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
38 #define BPF_JIT_REGION_END (VMALLOC_END)
41 /* Modules always live before the kernel */
43 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
44 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
48 * Roughly size the vmemmap space to be large enough to fit enough
49 * struct pages to map half the virtual address space. Then
50 * position vmemmap directly below the VMALLOC region.
52 #define VMEMMAP_SHIFT \
53 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
54 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
55 #define VMEMMAP_END (VMALLOC_START - 1)
56 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
59 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
60 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
62 #define vmemmap ((struct page *)VMEMMAP_START)
64 #define PCI_IO_SIZE SZ_16M
65 #define PCI_IO_END VMEMMAP_START
66 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
68 #define FIXADDR_TOP PCI_IO_START
70 #define FIXADDR_SIZE PMD_SIZE
72 #define FIXADDR_SIZE PGDIR_SIZE
74 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
78 #ifdef CONFIG_XIP_KERNEL
79 #define XIP_OFFSET SZ_8M
84 /* Page Upper Directory not used in RISC-V */
85 #include <asm-generic/pgtable-nopud.h>
87 #include <asm/tlbflush.h>
88 #include <linux/mm_types.h>
91 #include <asm/pgtable-64.h>
93 #include <asm/pgtable-32.h>
94 #endif /* CONFIG_64BIT */
96 #ifdef CONFIG_XIP_KERNEL
97 #define XIP_FIXUP(addr) ({ \
98 uintptr_t __a = (uintptr_t)(addr); \
99 (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \
100 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
104 #define XIP_FIXUP(addr) (addr)
105 #endif /* CONFIG_XIP_KERNEL */
108 /* Number of entries in the page global directory */
109 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
110 /* Number of entries in the page table */
111 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
113 /* Number of PGD entries that a user-mode program can use */
114 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
116 /* Page protection bits */
117 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
119 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
120 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
121 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
122 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
123 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
124 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
125 _PAGE_EXEC | _PAGE_WRITE)
127 #define PAGE_COPY PAGE_READ
128 #define PAGE_COPY_EXEC PAGE_EXEC
129 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
130 #define PAGE_SHARED PAGE_WRITE
131 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
133 #define _PAGE_KERNEL (_PAGE_READ \
139 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
140 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
141 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
142 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
145 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
148 * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
149 * change the properties of memory regions.
151 #define _PAGE_IOREMAP _PAGE_KERNEL
153 extern pgd_t swapper_pg_dir[];
155 /* MAP_PRIVATE permissions: xwr (copy-on-write) */
156 #define __P000 PAGE_NONE
157 #define __P001 PAGE_READ
158 #define __P010 PAGE_COPY
159 #define __P011 PAGE_COPY
160 #define __P100 PAGE_EXEC
161 #define __P101 PAGE_READ_EXEC
162 #define __P110 PAGE_COPY_EXEC
163 #define __P111 PAGE_COPY_READ_EXEC
165 /* MAP_SHARED permissions: xwr */
166 #define __S000 PAGE_NONE
167 #define __S001 PAGE_READ
168 #define __S010 PAGE_SHARED
169 #define __S011 PAGE_SHARED
170 #define __S100 PAGE_EXEC
171 #define __S101 PAGE_READ_EXEC
172 #define __S110 PAGE_SHARED_EXEC
173 #define __S111 PAGE_SHARED_EXEC
175 static inline int pmd_present(pmd_t pmd)
177 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
180 static inline int pmd_none(pmd_t pmd)
182 return (pmd_val(pmd) == 0);
185 static inline int pmd_bad(pmd_t pmd)
187 return !pmd_present(pmd);
190 #define pmd_leaf pmd_leaf
191 static inline int pmd_leaf(pmd_t pmd)
193 return pmd_present(pmd) &&
194 (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
197 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
202 static inline void pmd_clear(pmd_t *pmdp)
204 set_pmd(pmdp, __pmd(0));
207 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
209 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
212 static inline unsigned long _pgd_pfn(pgd_t pgd)
214 return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
217 static inline struct page *pmd_page(pmd_t pmd)
219 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
222 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
224 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
227 static inline pte_t pmd_pte(pmd_t pmd)
229 return __pte(pmd_val(pmd));
232 /* Yields the page frame number (PFN) of a page table entry */
233 static inline unsigned long pte_pfn(pte_t pte)
235 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
238 #define pte_page(x) pfn_to_page(pte_pfn(x))
240 /* Constructs a page table entry */
241 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
243 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
246 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
248 static inline int pte_present(pte_t pte)
250 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
253 static inline int pte_none(pte_t pte)
255 return (pte_val(pte) == 0);
258 static inline int pte_write(pte_t pte)
260 return pte_val(pte) & _PAGE_WRITE;
263 static inline int pte_exec(pte_t pte)
265 return pte_val(pte) & _PAGE_EXEC;
268 static inline int pte_huge(pte_t pte)
270 return pte_present(pte)
271 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
274 static inline int pte_dirty(pte_t pte)
276 return pte_val(pte) & _PAGE_DIRTY;
279 static inline int pte_young(pte_t pte)
281 return pte_val(pte) & _PAGE_ACCESSED;
284 static inline int pte_special(pte_t pte)
286 return pte_val(pte) & _PAGE_SPECIAL;
289 /* static inline pte_t pte_rdprotect(pte_t pte) */
291 static inline pte_t pte_wrprotect(pte_t pte)
293 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
296 /* static inline pte_t pte_mkread(pte_t pte) */
298 static inline pte_t pte_mkwrite(pte_t pte)
300 return __pte(pte_val(pte) | _PAGE_WRITE);
303 /* static inline pte_t pte_mkexec(pte_t pte) */
305 static inline pte_t pte_mkdirty(pte_t pte)
307 return __pte(pte_val(pte) | _PAGE_DIRTY);
310 static inline pte_t pte_mkclean(pte_t pte)
312 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
315 static inline pte_t pte_mkyoung(pte_t pte)
317 return __pte(pte_val(pte) | _PAGE_ACCESSED);
320 static inline pte_t pte_mkold(pte_t pte)
322 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
325 static inline pte_t pte_mkspecial(pte_t pte)
327 return __pte(pte_val(pte) | _PAGE_SPECIAL);
330 static inline pte_t pte_mkhuge(pte_t pte)
335 #ifdef CONFIG_NUMA_BALANCING
337 * See the comment in include/asm-generic/pgtable.h
339 static inline int pte_protnone(pte_t pte)
341 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
344 static inline int pmd_protnone(pmd_t pmd)
346 return pte_protnone(pmd_pte(pmd));
350 /* Modify page protection bits */
351 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
353 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
356 #define pgd_ERROR(e) \
357 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
360 /* Commit new configuration to MMU hardware */
361 static inline void update_mmu_cache(struct vm_area_struct *vma,
362 unsigned long address, pte_t *ptep)
365 * The kernel assumes that TLBs don't cache invalid entries, but
366 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
367 * cache flush; it is necessary even after writing invalid entries.
368 * Relying on flush_tlb_fix_spurious_fault would suffice, but
369 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
371 local_flush_tlb_page(address);
374 #define __HAVE_ARCH_PTE_SAME
375 static inline int pte_same(pte_t pte_a, pte_t pte_b)
377 return pte_val(pte_a) == pte_val(pte_b);
381 * Certain architectures need to do special things when PTEs within
382 * a page table are directly modified. Thus, the following hook is
385 static inline void set_pte(pte_t *ptep, pte_t pteval)
390 void flush_icache_pte(pte_t pte);
392 static inline void set_pte_at(struct mm_struct *mm,
393 unsigned long addr, pte_t *ptep, pte_t pteval)
395 if (pte_present(pteval) && pte_exec(pteval))
396 flush_icache_pte(pteval);
398 set_pte(ptep, pteval);
401 static inline void pte_clear(struct mm_struct *mm,
402 unsigned long addr, pte_t *ptep)
404 set_pte_at(mm, addr, ptep, __pte(0));
407 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
408 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
409 unsigned long address, pte_t *ptep,
410 pte_t entry, int dirty)
412 if (!pte_same(*ptep, entry))
413 set_pte_at(vma->vm_mm, address, ptep, entry);
415 * update_mmu_cache will unconditionally execute, handling both
416 * the case that the PTE changed and the spurious fault case.
421 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
422 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
423 unsigned long address, pte_t *ptep)
425 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
428 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
429 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
430 unsigned long address,
433 if (!pte_young(*ptep))
435 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
438 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
439 static inline void ptep_set_wrprotect(struct mm_struct *mm,
440 unsigned long address, pte_t *ptep)
442 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
445 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
446 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
447 unsigned long address, pte_t *ptep)
450 * This comment is borrowed from x86, but applies equally to RISC-V:
452 * Clearing the accessed bit without a TLB flush
453 * doesn't cause data corruption. [ It could cause incorrect
454 * page aging and the (mistaken) reclaim of hot pages, but the
455 * chance of that should be relatively low. ]
457 * So as a performance optimization don't flush the TLB when
458 * clearing the accessed bit, it will eventually be flushed by
459 * a context switch or a VM operation anyway. [ In the rare
460 * event of it not getting flushed for a long time the delay
461 * shouldn't really matter because there's no real memory
462 * pressure for swapout to react to. ]
464 return ptep_test_and_clear_young(vma, address, ptep);
468 * Encode and decode a swap entry
470 * Format of swap PTE:
471 * bit 0: _PAGE_PRESENT (zero)
472 * bit 1: _PAGE_PROT_NONE (zero)
473 * bits 2 to 6: swap type
474 * bits 7 to XLEN-1: swap offset
476 #define __SWP_TYPE_SHIFT 2
477 #define __SWP_TYPE_BITS 5
478 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
479 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
481 #define MAX_SWAPFILES_CHECK() \
482 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
484 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
485 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
486 #define __swp_entry(type, offset) ((swp_entry_t) \
487 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
489 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
490 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
493 * In the RV64 Linux scheme, we give the user half of the virtual-address space
494 * and give the kernel the other (upper) half.
497 #define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
499 #define KERN_VIRT_START FIXADDR_START
503 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
504 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
507 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
509 #define TASK_SIZE FIXADDR_START
512 #else /* CONFIG_MMU */
514 #define PAGE_SHARED __pgprot(0)
515 #define PAGE_KERNEL __pgprot(0)
516 #define swapper_pg_dir NULL
517 #define TASK_SIZE 0xffffffffUL
518 #define VMALLOC_START 0
519 #define VMALLOC_END TASK_SIZE
521 #endif /* !CONFIG_MMU */
523 #define kern_addr_valid(addr) (1) /* FIXME */
525 extern char _start[];
526 extern void *_dtb_early_va;
527 extern uintptr_t _dtb_early_pa;
528 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
529 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
530 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
532 #define dtb_early_va _dtb_early_va
533 #define dtb_early_pa _dtb_early_pa
534 #endif /* CONFIG_XIP_KERNEL */
536 void setup_bootmem(void);
537 void paging_init(void);
538 void misc_mem_init(void);
540 #define FIRST_USER_ADDRESS 0
543 * ZERO_PAGE is a global shared page that is always zero,
544 * used for zero-mapped memory areas, etc.
546 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
547 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
549 #endif /* !__ASSEMBLY__ */
551 #endif /* _ASM_RISCV_PGTABLE_H */