1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
12 #include <asm/pgtable-bits.h>
15 #define KERNEL_LINK_ADDR PAGE_OFFSET
16 #define KERN_VIRT_SIZE (UL(-1))
19 #define ADDRESS_SPACE_END (UL(-1))
22 /* Leave 2GB for kernel and BPF at the end of the address space */
23 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
25 #define KERNEL_LINK_ADDR PAGE_OFFSET
28 /* Number of entries in the page global directory */
29 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
30 /* Number of entries in the page table */
31 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
34 * Half of the kernel address space (half of the entries of the page global
35 * directory) is for the direct mapping.
37 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
39 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
40 #define VMALLOC_END PAGE_OFFSET
41 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
43 #define BPF_JIT_REGION_SIZE (SZ_128M)
45 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46 #define BPF_JIT_REGION_END (MODULES_END)
48 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49 #define BPF_JIT_REGION_END (VMALLOC_END)
52 /* Modules always live before the kernel */
54 /* This is used to define the end of the KASAN shadow region */
55 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
56 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
61 * Roughly size the vmemmap space to be large enough to fit enough
62 * struct pages to map half the virtual address space. Then
63 * position vmemmap directly below the VMALLOC region.
66 #define VA_BITS (pgtable_l5_enabled ? \
67 57 : (pgtable_l4_enabled ? 48 : 39))
72 #define VMEMMAP_SHIFT \
73 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
74 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
75 #define VMEMMAP_END VMALLOC_START
76 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
79 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
80 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
82 #define vmemmap ((struct page *)VMEMMAP_START)
84 #define PCI_IO_SIZE SZ_16M
85 #define PCI_IO_END VMEMMAP_START
86 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
88 #define FIXADDR_TOP PCI_IO_START
90 #define FIXADDR_SIZE PMD_SIZE
92 #define FIXADDR_SIZE PGDIR_SIZE
94 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
98 #ifdef CONFIG_XIP_KERNEL
99 #define XIP_OFFSET SZ_32M
100 #define XIP_OFFSET_MASK (SZ_32M - 1)
107 #include <asm/page.h>
108 #include <asm/tlbflush.h>
109 #include <linux/mm_types.h>
111 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
114 #include <asm/pgtable-64.h>
116 #include <asm/pgtable-32.h>
117 #endif /* CONFIG_64BIT */
119 #ifdef CONFIG_XIP_KERNEL
120 #define XIP_FIXUP(addr) ({ \
121 uintptr_t __a = (uintptr_t)(addr); \
122 (__a >= CONFIG_XIP_PHYS_ADDR && \
123 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
124 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
128 #define XIP_FIXUP(addr) (addr)
129 #endif /* CONFIG_XIP_KERNEL */
131 struct pt_alloc_ops {
132 pte_t *(*get_pte_virt)(phys_addr_t pa);
133 phys_addr_t (*alloc_pte)(uintptr_t va);
134 #ifndef __PAGETABLE_PMD_FOLDED
135 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
136 phys_addr_t (*alloc_pmd)(uintptr_t va);
137 pud_t *(*get_pud_virt)(phys_addr_t pa);
138 phys_addr_t (*alloc_pud)(uintptr_t va);
139 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
140 phys_addr_t (*alloc_p4d)(uintptr_t va);
144 extern struct pt_alloc_ops pt_ops __initdata;
147 /* Number of PGD entries that a user-mode program can use */
148 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
150 /* Page protection bits */
151 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
153 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
154 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
155 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
156 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
157 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
158 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
159 _PAGE_EXEC | _PAGE_WRITE)
161 #define PAGE_COPY PAGE_READ
162 #define PAGE_COPY_EXEC PAGE_EXEC
163 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
164 #define PAGE_SHARED PAGE_WRITE
165 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
167 #define _PAGE_KERNEL (_PAGE_READ \
174 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
175 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
176 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
177 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
180 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
183 * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
184 * change the properties of memory regions.
186 #define _PAGE_IOREMAP _PAGE_KERNEL
188 extern pgd_t swapper_pg_dir[];
190 /* MAP_PRIVATE permissions: xwr (copy-on-write) */
191 #define __P000 PAGE_NONE
192 #define __P001 PAGE_READ
193 #define __P010 PAGE_COPY
194 #define __P011 PAGE_COPY
195 #define __P100 PAGE_EXEC
196 #define __P101 PAGE_READ_EXEC
197 #define __P110 PAGE_COPY_EXEC
198 #define __P111 PAGE_COPY_READ_EXEC
200 /* MAP_SHARED permissions: xwr */
201 #define __S000 PAGE_NONE
202 #define __S001 PAGE_READ
203 #define __S010 PAGE_SHARED
204 #define __S011 PAGE_SHARED
205 #define __S100 PAGE_EXEC
206 #define __S101 PAGE_READ_EXEC
207 #define __S110 PAGE_SHARED_EXEC
208 #define __S111 PAGE_SHARED_EXEC
210 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
211 static inline int pmd_present(pmd_t pmd)
214 * Checking for _PAGE_LEAF is needed too because:
215 * When splitting a THP, split_huge_page() will temporarily clear
216 * the present bit, in this situation, pmd_present() and
217 * pmd_trans_huge() still needs to return true.
219 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
222 static inline int pmd_present(pmd_t pmd)
224 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
228 static inline int pmd_none(pmd_t pmd)
230 return (pmd_val(pmd) == 0);
233 static inline int pmd_bad(pmd_t pmd)
235 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
238 #define pmd_leaf pmd_leaf
239 static inline int pmd_leaf(pmd_t pmd)
241 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
244 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
249 static inline void pmd_clear(pmd_t *pmdp)
251 set_pmd(pmdp, __pmd(0));
254 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
256 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
259 static inline unsigned long _pgd_pfn(pgd_t pgd)
261 return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
264 static inline struct page *pmd_page(pmd_t pmd)
266 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
269 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
271 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
274 static inline pte_t pmd_pte(pmd_t pmd)
276 return __pte(pmd_val(pmd));
279 static inline pte_t pud_pte(pud_t pud)
281 return __pte(pud_val(pud));
284 /* Yields the page frame number (PFN) of a page table entry */
285 static inline unsigned long pte_pfn(pte_t pte)
287 return __page_val_to_pfn(pte_val(pte));
290 #define pte_page(x) pfn_to_page(pte_pfn(x))
292 /* Constructs a page table entry */
293 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
295 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
298 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
300 static inline int pte_present(pte_t pte)
302 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
305 static inline int pte_none(pte_t pte)
307 return (pte_val(pte) == 0);
310 static inline int pte_write(pte_t pte)
312 return pte_val(pte) & _PAGE_WRITE;
315 static inline int pte_exec(pte_t pte)
317 return pte_val(pte) & _PAGE_EXEC;
320 static inline int pte_huge(pte_t pte)
322 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
325 static inline int pte_dirty(pte_t pte)
327 return pte_val(pte) & _PAGE_DIRTY;
330 static inline int pte_young(pte_t pte)
332 return pte_val(pte) & _PAGE_ACCESSED;
335 static inline int pte_special(pte_t pte)
337 return pte_val(pte) & _PAGE_SPECIAL;
340 /* static inline pte_t pte_rdprotect(pte_t pte) */
342 static inline pte_t pte_wrprotect(pte_t pte)
344 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
347 /* static inline pte_t pte_mkread(pte_t pte) */
349 static inline pte_t pte_mkwrite(pte_t pte)
351 return __pte(pte_val(pte) | _PAGE_WRITE);
354 /* static inline pte_t pte_mkexec(pte_t pte) */
356 static inline pte_t pte_mkdirty(pte_t pte)
358 return __pte(pte_val(pte) | _PAGE_DIRTY);
361 static inline pte_t pte_mkclean(pte_t pte)
363 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
366 static inline pte_t pte_mkyoung(pte_t pte)
368 return __pte(pte_val(pte) | _PAGE_ACCESSED);
371 static inline pte_t pte_mkold(pte_t pte)
373 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
376 static inline pte_t pte_mkspecial(pte_t pte)
378 return __pte(pte_val(pte) | _PAGE_SPECIAL);
381 static inline pte_t pte_mkhuge(pte_t pte)
386 #ifdef CONFIG_NUMA_BALANCING
388 * See the comment in include/asm-generic/pgtable.h
390 static inline int pte_protnone(pte_t pte)
392 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
395 static inline int pmd_protnone(pmd_t pmd)
397 return pte_protnone(pmd_pte(pmd));
401 /* Modify page protection bits */
402 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
404 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
407 #define pgd_ERROR(e) \
408 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
411 /* Commit new configuration to MMU hardware */
412 static inline void update_mmu_cache(struct vm_area_struct *vma,
413 unsigned long address, pte_t *ptep)
416 * The kernel assumes that TLBs don't cache invalid entries, but
417 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
418 * cache flush; it is necessary even after writing invalid entries.
419 * Relying on flush_tlb_fix_spurious_fault would suffice, but
420 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
422 local_flush_tlb_page(address);
425 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
426 unsigned long address, pmd_t *pmdp)
428 pte_t *ptep = (pte_t *)pmdp;
430 update_mmu_cache(vma, address, ptep);
433 #define __HAVE_ARCH_PTE_SAME
434 static inline int pte_same(pte_t pte_a, pte_t pte_b)
436 return pte_val(pte_a) == pte_val(pte_b);
440 * Certain architectures need to do special things when PTEs within
441 * a page table are directly modified. Thus, the following hook is
444 static inline void set_pte(pte_t *ptep, pte_t pteval)
449 void flush_icache_pte(pte_t pte);
451 static inline void set_pte_at(struct mm_struct *mm,
452 unsigned long addr, pte_t *ptep, pte_t pteval)
454 if (pte_present(pteval) && pte_exec(pteval))
455 flush_icache_pte(pteval);
457 set_pte(ptep, pteval);
460 static inline void pte_clear(struct mm_struct *mm,
461 unsigned long addr, pte_t *ptep)
463 set_pte_at(mm, addr, ptep, __pte(0));
466 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
467 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
468 unsigned long address, pte_t *ptep,
469 pte_t entry, int dirty)
471 if (!pte_same(*ptep, entry))
472 set_pte_at(vma->vm_mm, address, ptep, entry);
474 * update_mmu_cache will unconditionally execute, handling both
475 * the case that the PTE changed and the spurious fault case.
480 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
481 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
482 unsigned long address, pte_t *ptep)
484 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
487 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
488 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
489 unsigned long address,
492 if (!pte_young(*ptep))
494 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
497 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
498 static inline void ptep_set_wrprotect(struct mm_struct *mm,
499 unsigned long address, pte_t *ptep)
501 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
504 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
505 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
506 unsigned long address, pte_t *ptep)
509 * This comment is borrowed from x86, but applies equally to RISC-V:
511 * Clearing the accessed bit without a TLB flush
512 * doesn't cause data corruption. [ It could cause incorrect
513 * page aging and the (mistaken) reclaim of hot pages, but the
514 * chance of that should be relatively low. ]
516 * So as a performance optimization don't flush the TLB when
517 * clearing the accessed bit, it will eventually be flushed by
518 * a context switch or a VM operation anyway. [ In the rare
519 * event of it not getting flushed for a long time the delay
520 * shouldn't really matter because there's no real memory
521 * pressure for swapout to react to. ]
523 return ptep_test_and_clear_young(vma, address, ptep);
529 static inline pmd_t pte_pmd(pte_t pte)
531 return __pmd(pte_val(pte));
534 static inline pmd_t pmd_mkhuge(pmd_t pmd)
539 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
541 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
544 #define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
546 static inline unsigned long pmd_pfn(pmd_t pmd)
548 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
551 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
553 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
556 #define pmd_write pmd_write
557 static inline int pmd_write(pmd_t pmd)
559 return pte_write(pmd_pte(pmd));
562 static inline int pmd_dirty(pmd_t pmd)
564 return pte_dirty(pmd_pte(pmd));
567 static inline int pmd_young(pmd_t pmd)
569 return pte_young(pmd_pte(pmd));
572 static inline pmd_t pmd_mkold(pmd_t pmd)
574 return pte_pmd(pte_mkold(pmd_pte(pmd)));
577 static inline pmd_t pmd_mkyoung(pmd_t pmd)
579 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
582 static inline pmd_t pmd_mkwrite(pmd_t pmd)
584 return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
587 static inline pmd_t pmd_wrprotect(pmd_t pmd)
589 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
592 static inline pmd_t pmd_mkclean(pmd_t pmd)
594 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
597 static inline pmd_t pmd_mkdirty(pmd_t pmd)
599 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
602 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
603 pmd_t *pmdp, pmd_t pmd)
605 return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
608 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
609 pud_t *pudp, pud_t pud)
611 return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
614 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
615 static inline int pmd_trans_huge(pmd_t pmd)
617 return pmd_leaf(pmd);
620 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
621 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
622 unsigned long address, pmd_t *pmdp,
623 pmd_t entry, int dirty)
625 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
628 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
629 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
630 unsigned long address, pmd_t *pmdp)
632 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
635 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
636 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
637 unsigned long address, pmd_t *pmdp)
639 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
642 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
643 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
644 unsigned long address, pmd_t *pmdp)
646 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
649 #define pmdp_establish pmdp_establish
650 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
651 unsigned long address, pmd_t *pmdp, pmd_t pmd)
653 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
655 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
658 * Encode and decode a swap entry
660 * Format of swap PTE:
661 * bit 0: _PAGE_PRESENT (zero)
662 * bit 1 to 3: _PAGE_LEAF (zero)
663 * bit 5: _PAGE_PROT_NONE (zero)
664 * bits 6 to 10: swap type
665 * bits 10 to XLEN-1: swap offset
667 #define __SWP_TYPE_SHIFT 6
668 #define __SWP_TYPE_BITS 5
669 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
670 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
672 #define MAX_SWAPFILES_CHECK() \
673 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
675 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
676 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
677 #define __swp_entry(type, offset) ((swp_entry_t) \
678 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
680 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
681 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
683 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
684 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
685 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
686 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
689 * In the RV64 Linux scheme, we give the user half of the virtual-address space
690 * and give the kernel the other (upper) half.
693 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
695 #define KERN_VIRT_START FIXADDR_START
699 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
700 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
702 * - 0x9fc00000 (~2.5GB) for RV32.
703 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
704 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
706 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
707 * Instruction Set Manual Volume II: Privileged Architecture" states that
708 * "load and store effective addresses, which are 64bits, must have bits
709 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
712 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
713 #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
715 #define TASK_SIZE FIXADDR_START
716 #define TASK_SIZE_MIN TASK_SIZE
719 #else /* CONFIG_MMU */
721 #define PAGE_SHARED __pgprot(0)
722 #define PAGE_KERNEL __pgprot(0)
723 #define swapper_pg_dir NULL
724 #define TASK_SIZE 0xffffffffUL
725 #define VMALLOC_START 0
726 #define VMALLOC_END TASK_SIZE
728 #endif /* !CONFIG_MMU */
730 #define kern_addr_valid(addr) (1) /* FIXME */
732 extern char _start[];
733 extern void *_dtb_early_va;
734 extern uintptr_t _dtb_early_pa;
735 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
736 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
737 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
739 #define dtb_early_va _dtb_early_va
740 #define dtb_early_pa _dtb_early_pa
741 #endif /* CONFIG_XIP_KERNEL */
742 extern u64 satp_mode;
743 extern bool pgtable_l4_enabled;
745 void paging_init(void);
746 void misc_mem_init(void);
749 * ZERO_PAGE is a global shared page that is always zero,
750 * used for zero-mapped memory areas, etc.
752 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
753 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
755 #endif /* !__ASSEMBLY__ */
757 #endif /* _ASM_RISCV_PGTABLE_H */