2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003 Ralf Baechle
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
14 #include <asm/pgtable-32.h>
17 #include <asm/pgtable-64.h>
21 #include <asm/pgtable-bits.h>
24 struct vm_area_struct;
26 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
27 _page_cachable_default)
28 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
29 _page_cachable_default)
30 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
31 _page_cachable_default)
32 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
33 _page_cachable_default)
34 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
35 _PAGE_GLOBAL | _page_cachable_default)
36 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
38 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
39 _page_cachable_default)
40 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
41 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
44 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
45 * execute, and consider it to be the same as read. Also, write
46 * permissions imply read permissions. This is the closest we can get
47 * by reasonable means..
51 * Dummy values to fill the table in mmap.c
52 * The real values will be generated at runtime
54 #define __P000 __pgprot(0)
55 #define __P001 __pgprot(0)
56 #define __P010 __pgprot(0)
57 #define __P011 __pgprot(0)
58 #define __P100 __pgprot(0)
59 #define __P101 __pgprot(0)
60 #define __P110 __pgprot(0)
61 #define __P111 __pgprot(0)
63 #define __S000 __pgprot(0)
64 #define __S001 __pgprot(0)
65 #define __S010 __pgprot(0)
66 #define __S011 __pgprot(0)
67 #define __S100 __pgprot(0)
68 #define __S101 __pgprot(0)
69 #define __S110 __pgprot(0)
70 #define __S111 __pgprot(0)
72 extern unsigned long _page_cachable_default;
75 * ZERO_PAGE is a global shared page that is always zero; used
76 * for zero-mapped memory areas etc..
79 extern unsigned long empty_zero_page;
80 extern unsigned long zero_page_mask;
82 #define ZERO_PAGE(vaddr) \
83 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
84 #define __HAVE_COLOR_ZERO_PAGE
86 extern void paging_init(void);
89 * Conversion functions: convert a page and protection to a page entry,
90 * and a page entry and page directory to the page they refer to.
92 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
94 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
95 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
96 #define pmd_page(pmd) __pmd_page(pmd)
97 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
99 #define pmd_page_vaddr(pmd) pmd_val(pmd)
103 unsigned long flags; \
106 local_irq_save(flags); \
107 if(!raw_current_cpu_data.htw_seq++) { \
108 write_c0_pwctl(read_c0_pwctl() & \
109 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
110 back_to_back_c0_hazard(); \
112 local_irq_restore(flags); \
116 #define htw_start() \
118 unsigned long flags; \
121 local_irq_save(flags); \
122 if (!--raw_current_cpu_data.htw_seq) { \
123 write_c0_pwctl(read_c0_pwctl() | \
124 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
125 back_to_back_c0_hazard(); \
127 local_irq_restore(flags); \
131 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
132 pte_t *ptep, pte_t pteval);
134 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
137 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
139 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
142 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
143 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
145 static inline void set_pte(pte_t *ptep, pte_t pte)
147 ptep->pte_high = pte.pte_high;
149 ptep->pte_low = pte.pte_low;
152 if (pte.pte_high & _PAGE_GLOBAL) {
154 if (pte.pte_low & _PAGE_GLOBAL) {
156 pte_t *buddy = ptep_buddy(ptep);
158 * Make sure the buddy is global too (if it's !none,
159 * it better already be global)
161 if (pte_none(*buddy)) {
162 if (!IS_ENABLED(CONFIG_XPA))
163 buddy->pte_low |= _PAGE_GLOBAL;
164 buddy->pte_high |= _PAGE_GLOBAL;
169 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
171 pte_t null = __pte(0);
174 /* Preserve global status for the pair */
175 if (IS_ENABLED(CONFIG_XPA)) {
176 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
177 null.pte_high = _PAGE_GLOBAL;
179 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
180 null.pte_low = null.pte_high = _PAGE_GLOBAL;
183 set_pte_at(mm, addr, ptep, null);
188 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
189 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
190 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
193 * Certain architectures need to do special things when pte's
194 * within a page table are directly modified. Thus, the following
195 * hook is made available.
197 static inline void set_pte(pte_t *ptep, pte_t pteval)
200 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
201 if (pte_val(pteval) & _PAGE_GLOBAL) {
202 pte_t *buddy = ptep_buddy(ptep);
204 * Make sure the buddy is global too (if it's !none,
205 * it better already be global)
209 * For SMP, multiple CPUs can race, so we need to do
212 unsigned long page_global = _PAGE_GLOBAL;
215 if (kernel_uses_llsc && R10000_LLSC_WAR) {
216 __asm__ __volatile__ (
218 " .set arch=r4000 \n"
220 "1:" __LL "%[tmp], %[buddy] \n"
221 " bnez %[tmp], 2f \n"
222 " or %[tmp], %[tmp], %[global] \n"
223 __SC "%[tmp], %[buddy] \n"
224 " beqzl %[tmp], 1b \n"
228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
229 : [global] "r" (page_global));
230 } else if (kernel_uses_llsc) {
231 __asm__ __volatile__ (
233 " .set "MIPS_ISA_ARCH_LEVEL" \n"
235 "1:" __LL "%[tmp], %[buddy] \n"
236 " bnez %[tmp], 2f \n"
237 " or %[tmp], %[tmp], %[global] \n"
238 __SC "%[tmp], %[buddy] \n"
239 " beqz %[tmp], 1b \n"
243 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
244 : [global] "r" (page_global));
246 #else /* !CONFIG_SMP */
247 if (pte_none(*buddy))
248 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
249 #endif /* CONFIG_SMP */
254 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
257 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
258 /* Preserve global status for the pair */
259 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
260 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
263 set_pte_at(mm, addr, ptep, __pte(0));
268 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
269 pte_t *ptep, pte_t pteval)
271 extern void __update_cache(unsigned long address, pte_t pte);
273 if (!pte_present(pteval))
274 goto cache_sync_done;
276 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
277 goto cache_sync_done;
279 __update_cache(addr, pteval);
281 set_pte(ptep, pteval);
285 * (pmds are folded into puds so this doesn't get actually called,
286 * but the define is needed for a generic inline function.)
288 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
290 #ifndef __PAGETABLE_PMD_FOLDED
292 * (puds are folded into pgds so this doesn't get actually called,
293 * but the define is needed for a generic inline function.)
295 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
298 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
299 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
300 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
303 * We used to declare this array with size but gcc 3.3 and older are not able
304 * to find that this expression is a constant, so the size is dropped.
306 extern pgd_t swapper_pg_dir[];
309 * The following only work if pte_present() is true.
310 * Undefined behaviour if not..
312 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
313 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
314 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
315 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
317 static inline pte_t pte_wrprotect(pte_t pte)
319 pte.pte_low &= ~_PAGE_WRITE;
320 if (!IS_ENABLED(CONFIG_XPA))
321 pte.pte_low &= ~_PAGE_SILENT_WRITE;
322 pte.pte_high &= ~_PAGE_SILENT_WRITE;
326 static inline pte_t pte_mkclean(pte_t pte)
328 pte.pte_low &= ~_PAGE_MODIFIED;
329 if (!IS_ENABLED(CONFIG_XPA))
330 pte.pte_low &= ~_PAGE_SILENT_WRITE;
331 pte.pte_high &= ~_PAGE_SILENT_WRITE;
335 static inline pte_t pte_mkold(pte_t pte)
337 pte.pte_low &= ~_PAGE_ACCESSED;
338 if (!IS_ENABLED(CONFIG_XPA))
339 pte.pte_low &= ~_PAGE_SILENT_READ;
340 pte.pte_high &= ~_PAGE_SILENT_READ;
344 static inline pte_t pte_mkwrite(pte_t pte)
346 pte.pte_low |= _PAGE_WRITE;
347 if (pte.pte_low & _PAGE_MODIFIED) {
348 if (!IS_ENABLED(CONFIG_XPA))
349 pte.pte_low |= _PAGE_SILENT_WRITE;
350 pte.pte_high |= _PAGE_SILENT_WRITE;
355 static inline pte_t pte_mkdirty(pte_t pte)
357 pte.pte_low |= _PAGE_MODIFIED;
358 if (pte.pte_low & _PAGE_WRITE) {
359 if (!IS_ENABLED(CONFIG_XPA))
360 pte.pte_low |= _PAGE_SILENT_WRITE;
361 pte.pte_high |= _PAGE_SILENT_WRITE;
366 static inline pte_t pte_mkyoung(pte_t pte)
368 pte.pte_low |= _PAGE_ACCESSED;
369 if (!(pte.pte_low & _PAGE_NO_READ)) {
370 if (!IS_ENABLED(CONFIG_XPA))
371 pte.pte_low |= _PAGE_SILENT_READ;
372 pte.pte_high |= _PAGE_SILENT_READ;
377 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
378 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
379 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
381 static inline pte_t pte_wrprotect(pte_t pte)
383 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
387 static inline pte_t pte_mkclean(pte_t pte)
389 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
393 static inline pte_t pte_mkold(pte_t pte)
395 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
399 static inline pte_t pte_mkwrite(pte_t pte)
401 pte_val(pte) |= _PAGE_WRITE;
402 if (pte_val(pte) & _PAGE_MODIFIED)
403 pte_val(pte) |= _PAGE_SILENT_WRITE;
407 static inline pte_t pte_mkdirty(pte_t pte)
409 pte_val(pte) |= _PAGE_MODIFIED;
410 if (pte_val(pte) & _PAGE_WRITE)
411 pte_val(pte) |= _PAGE_SILENT_WRITE;
415 static inline pte_t pte_mkyoung(pte_t pte)
417 pte_val(pte) |= _PAGE_ACCESSED;
418 if (!(pte_val(pte) & _PAGE_NO_READ))
419 pte_val(pte) |= _PAGE_SILENT_READ;
423 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
424 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
426 static inline pte_t pte_mkhuge(pte_t pte)
428 pte_val(pte) |= _PAGE_HUGE;
431 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
433 static inline int pte_special(pte_t pte) { return 0; }
434 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
437 * Macro to make mark a page protection value as "uncacheable". Note
438 * that "protection" is really a misnomer here as the protection value
439 * contains the memory attribute bits, dirty bits, and various other
442 #define pgprot_noncached pgprot_noncached
444 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
446 unsigned long prot = pgprot_val(_prot);
448 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
450 return __pgprot(prot);
453 #define pgprot_writecombine pgprot_writecombine
455 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
457 unsigned long prot = pgprot_val(_prot);
459 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
460 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
462 return __pgprot(prot);
466 * Conversion functions: convert a page and protection to a page entry,
467 * and a page entry and page directory to the page they refer to.
469 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
471 #if defined(CONFIG_XPA)
472 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
474 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
475 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
476 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
477 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
480 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
481 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
483 pte.pte_low &= _PAGE_CHG_MASK;
484 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
485 pte.pte_low |= pgprot_val(newprot);
486 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
490 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
492 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
493 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
498 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
501 static inline void update_mmu_cache(struct vm_area_struct *vma,
502 unsigned long address, pte_t *ptep)
505 __update_tlb(vma, address, pte);
508 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
509 unsigned long address, pmd_t *pmdp)
511 pte_t pte = *(pte_t *)pmdp;
513 __update_tlb(vma, address, pte);
516 #define kern_addr_valid(addr) (1)
518 #ifdef CONFIG_PHYS_ADDR_T_64BIT
519 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
521 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
527 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
528 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
530 #define io_remap_pfn_range io_remap_pfn_range
533 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
535 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
536 #define pmdp_establish generic_pmdp_establish
538 #define has_transparent_hugepage has_transparent_hugepage
539 extern int has_transparent_hugepage(void);
541 static inline int pmd_trans_huge(pmd_t pmd)
543 return !!(pmd_val(pmd) & _PAGE_HUGE);
546 static inline pmd_t pmd_mkhuge(pmd_t pmd)
548 pmd_val(pmd) |= _PAGE_HUGE;
553 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
554 pmd_t *pmdp, pmd_t pmd);
556 #define pmd_write pmd_write
557 static inline int pmd_write(pmd_t pmd)
559 return !!(pmd_val(pmd) & _PAGE_WRITE);
562 static inline pmd_t pmd_wrprotect(pmd_t pmd)
564 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
568 static inline pmd_t pmd_mkwrite(pmd_t pmd)
570 pmd_val(pmd) |= _PAGE_WRITE;
571 if (pmd_val(pmd) & _PAGE_MODIFIED)
572 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
577 static inline int pmd_dirty(pmd_t pmd)
579 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
582 static inline pmd_t pmd_mkclean(pmd_t pmd)
584 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
588 static inline pmd_t pmd_mkdirty(pmd_t pmd)
590 pmd_val(pmd) |= _PAGE_MODIFIED;
591 if (pmd_val(pmd) & _PAGE_WRITE)
592 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
597 static inline int pmd_young(pmd_t pmd)
599 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
602 static inline pmd_t pmd_mkold(pmd_t pmd)
604 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
609 static inline pmd_t pmd_mkyoung(pmd_t pmd)
611 pmd_val(pmd) |= _PAGE_ACCESSED;
613 if (!(pmd_val(pmd) & _PAGE_NO_READ))
614 pmd_val(pmd) |= _PAGE_SILENT_READ;
619 /* Extern to avoid header file madness */
620 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
622 static inline unsigned long pmd_pfn(pmd_t pmd)
624 return pmd_val(pmd) >> _PFN_SHIFT;
627 static inline struct page *pmd_page(pmd_t pmd)
629 if (pmd_trans_huge(pmd))
630 return pfn_to_page(pmd_pfn(pmd));
632 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
635 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
637 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
638 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
642 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
644 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
650 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
651 * different prototype.
653 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
654 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
655 unsigned long address, pmd_t *pmdp)
664 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
666 #include <asm-generic/pgtable.h>
669 * uncached accelerated TLB map for video memory access
671 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
672 #define __HAVE_PHYS_MEM_ACCESS_PROT
675 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
676 unsigned long size, pgprot_t vma_prot);
680 * We provide our own get_unmapped area to cope with the virtual aliasing
681 * constraints placed on us by the cache architecture.
683 #define HAVE_ARCH_UNMAPPED_AREA
684 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
687 * No page table caches to initialise
689 #define pgtable_cache_init() do { } while (0)
691 #endif /* _ASM_PGTABLE_H */