2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
13 #include <linux/const.h>
14 #include <linux/kernel.h>
15 #include <asm/mipsregs.h>
18 * PAGE_SHIFT determines the page size
20 #ifdef CONFIG_PAGE_SIZE_4KB
23 #ifdef CONFIG_PAGE_SIZE_8KB
26 #ifdef CONFIG_PAGE_SIZE_16KB
29 #ifdef CONFIG_PAGE_SIZE_32KB
32 #ifdef CONFIG_PAGE_SIZE_64KB
35 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
36 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
39 * This is used for calculating the real page sizes
40 * for FTLB or VTLB + FTLB configurations.
42 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
45 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
46 if (PAGE_SIZE == (1 << 30))
48 if (PAGE_SIZE == (1llu << 32))
50 if (PAGE_SIZE > (256 << 10))
51 return 7; /* reserved */
53 case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
54 return (PAGE_SHIFT - 10) / 2;
56 panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
61 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
62 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
63 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
64 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
65 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
66 #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
67 #define HPAGE_SHIFT ({BUILD_BUG(); 0; })
68 #define HPAGE_SIZE ({BUILD_BUG(); 0; })
69 #define HPAGE_MASK ({BUILD_BUG(); 0; })
70 #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
71 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
73 #include <linux/pfn.h>
75 extern void build_clear_page(void);
76 extern void build_copy_page(void);
79 * It's normally defined only for FLATMEM config but it's
80 * used in our early mem init code for all memory models.
81 * So always define it.
83 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
84 extern unsigned long ARCH_PFN_OFFSET;
85 # define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
87 # define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
90 extern void clear_page(void * page);
91 extern void copy_page(void * to, void * from);
93 extern unsigned long shm_align_mask;
95 static inline unsigned long pages_do_alias(unsigned long addr1,
98 return (addr1 ^ addr2) & shm_align_mask;
103 static inline void clear_user_page(void *addr, unsigned long vaddr,
106 extern void (*flush_data_cache_page)(unsigned long addr);
109 if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
110 flush_data_cache_page((unsigned long)addr);
113 struct vm_area_struct;
114 extern void copy_user_highpage(struct page *to, struct page *from,
115 unsigned long vaddr, struct vm_area_struct *vma);
117 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
120 * These are used to make use of C type-checking..
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 #ifdef CONFIG_CPU_MIPS32
124 typedef struct { unsigned long pte_low, pte_high; } pte_t;
125 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
126 #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
128 typedef struct { unsigned long long pte; } pte_t;
129 #define pte_val(x) ((x).pte)
130 #define __pte(x) ((pte_t) { (x) } )
133 typedef struct { unsigned long pte; } pte_t;
134 #define pte_val(x) ((x).pte)
135 #define __pte(x) ((pte_t) { (x) } )
137 typedef struct page *pgtable_t;
140 * Right now we don't support 4-level pagetables, so all pud-related
141 * definitions come from <asm-generic/pgtable-nopud.h>.
145 * Finall the top of the hierarchy, the pgd
147 typedef struct { unsigned long pgd; } pgd_t;
148 #define pgd_val(x) ((x).pgd)
149 #define __pgd(x) ((pgd_t) { (x) } )
152 * Manipulate page protection bits
154 typedef struct { unsigned long pgprot; } pgprot_t;
155 #define pgprot_val(x) ((x).pgprot)
156 #define __pgprot(x) ((pgprot_t) { (x) } )
157 #define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
160 * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
161 * pair of pages we only have a single global bit per pair of pages. When
162 * writing to the TLB make sure we always have the bit set for both pages
163 * or none. This macro is used to access the `buddy' of the pte we're just
166 #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
169 * __pa()/__va() should be used only during mem init.
171 static inline unsigned long ___pa(unsigned long x)
173 if (IS_ENABLED(CONFIG_64BIT)) {
175 * For MIPS64 the virtual address may either be in one of
176 * the compatibility segements ckseg0 or ckseg1, or it may
179 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
182 if (!IS_ENABLED(CONFIG_EVA)) {
184 * We're using the standard MIPS32 legacy memory map, ie.
185 * the address x is going to be in kseg0 or kseg1. We can
186 * handle either case by masking out the desired bits using
193 * EVA is in use so the memory map could be anything, making it not
194 * safe to just mask out bits.
196 return x - PAGE_OFFSET + PHYS_OFFSET;
198 #define __pa(x) ___pa((unsigned long)(x))
199 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
203 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
204 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
205 * discussion can be found in lkml posting
206 * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
207 * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
209 * It is unclear if the misscompilations mentioned in
210 * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
211 * until GCC 3.x has been retired before we can apply
212 * https://patchwork.linux-mips.org/patch/1541/
216 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
219 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
221 #ifdef CONFIG_FLATMEM
223 static inline int pfn_valid(unsigned long pfn)
225 /* avoid <linux/mm.h> include hell */
226 extern unsigned long max_mapnr;
227 unsigned long pfn_offset = ARCH_PFN_OFFSET;
229 return pfn >= pfn_offset && pfn < max_mapnr;
232 #elif defined(CONFIG_SPARSEMEM)
234 /* pfn_valid is defined in linux/mmzone.h */
236 #elif defined(CONFIG_NEED_MULTIPLE_NODES)
238 #define pfn_valid(pfn) \
240 unsigned long __pfn = (pfn); \
241 int __n = pfn_to_nid(__pfn); \
242 ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \
243 NODE_DATA(__n)->node_spanned_pages) \
249 #define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
250 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
252 extern bool __virt_addr_valid(const volatile void *kaddr);
253 #define virt_addr_valid(kaddr) \
254 __virt_addr_valid((const volatile void *) (kaddr))
256 #define VM_DATA_DEFAULT_FLAGS \
257 (VM_READ | VM_WRITE | \
258 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
259 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
261 #include <asm-generic/memory_model.h>
262 #include <asm-generic/getorder.h>
264 #endif /* _ASM_PAGE_H */