1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/include/asm/memory.h
5 * Copyright (C) 2000-2002 Russell King
6 * modification for nommu, Hyok S. Choi, 2004
8 * Note: this file should not be included by non-asm/.h files
10 #ifndef __ASM_ARM_MEMORY_H
11 #define __ASM_ARM_MEMORY_H
13 #include <linux/compiler.h>
14 #include <linux/const.h>
15 #include <linux/types.h>
16 #include <linux/sizes.h>
18 #ifdef CONFIG_NEED_MACH_MEMORY_H
19 #include <mach/memory.h>
21 #include <asm/kasan_def.h>
23 /* PAGE_OFFSET - the virtual address of the start of the kernel image */
24 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
29 * TASK_SIZE - the maximum size of a user space task.
30 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
33 #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
35 #define TASK_SIZE (KASAN_SHADOW_START)
37 #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
40 * The maximum size of a 26-bit user space task.
42 #define TASK_SIZE_26 (UL(1) << 26)
45 * The module space lives between the addresses given by TASK_SIZE
46 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
48 #ifndef CONFIG_THUMB2_KERNEL
49 #define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
51 /* smaller range for Thumb-2 symbols relocation (2^24)*/
52 #define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
55 #if TASK_SIZE > MODULES_VADDR
56 #error Top of user space clashes with start of module space
60 * The highmem pkmap virtual space shares the end of the module area.
63 #define MODULES_END (PAGE_OFFSET - PMD_SIZE)
65 #define MODULES_END (PAGE_OFFSET)
69 * The XIP kernel gets mapped at the bottom of the module vm area.
70 * Since we use sections to map it, this macro replaces the physical address
71 * with its virtual address while keeping offset from the base section.
73 #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
75 #define FDT_FIXED_BASE UL(0xff800000)
76 #define FDT_FIXED_SIZE (2 * SECTION_SIZE)
77 #define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
79 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
81 * Allow 16MB-aligned ioremap pages
83 #define IOREMAP_MAX_ORDER 24
86 #define VECTORS_BASE UL(0xffff0000)
88 #else /* CONFIG_MMU */
91 extern unsigned long setup_vectors_base(void);
92 extern unsigned long vectors_base;
93 #define VECTORS_BASE vectors_base
97 * The limitation of user task size can grow up to the end of free ram region.
98 * It is difficult to define and perhaps will never meet the original meaning
99 * of this define that was meant to.
100 * Fortunately, there is no reference for this in noMMU mode, for now.
102 #define TASK_SIZE UL(0xffffffff)
104 #ifndef TASK_UNMAPPED_BASE
105 #define TASK_UNMAPPED_BASE UL(0x00000000)
109 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
113 * The module can be at any place in ram in nommu mode.
115 #define MODULES_END (END_MEM)
116 #define MODULES_VADDR PAGE_OFFSET
118 #define XIP_VIRT_ADDR(physaddr) (physaddr)
119 #define FDT_VIRT_BASE(physbase) ((void *)(physbase))
121 #endif /* !CONFIG_MMU */
123 #ifdef CONFIG_XIP_KERNEL
124 #define KERNEL_START _sdata
126 #define KERNEL_START _stext
128 #define KERNEL_END _end
131 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
134 #ifdef CONFIG_HAVE_TCM
135 #define ITCM_OFFSET UL(0xfffe0000)
136 #define DTCM_OFFSET UL(0xfffe8000)
140 * Convert a page to/from a physical address
142 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
143 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
146 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
147 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
148 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
149 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
151 #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
156 * Physical vs virtual RAM address space conversion. These are
157 * private definitions which should NOT be used outside memory.h
158 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
160 * PFNs are used to describe any physical page; this means
161 * PFN 0 == physical address 0.
164 #if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
167 * Constants used to force the right instruction encodings and shifts
168 * so that all we need to do is modify the 8-bit constant field.
170 #define __PV_BITS_31_24 0x81000000
171 #define __PV_BITS_23_16 0x810000
172 #define __PV_BITS_7_0 0x81
174 extern unsigned long __pv_phys_pfn_offset;
175 extern u64 __pv_offset;
176 extern void fixup_pv_table(const void *, unsigned long);
177 extern const void *__pv_table_begin, *__pv_table_end;
179 #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
180 #define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
182 #ifndef CONFIG_THUMB2_KERNEL
183 #define __pv_stub(from,to,instr) \
184 __asm__("@ __pv_stub\n" \
185 "1: " instr " %0, %1, %2\n" \
186 "2: " instr " %0, %0, %3\n" \
187 " .pushsection .pv_table,\"a\"\n" \
188 " .long 1b - ., 2b - .\n" \
191 : "r" (from), "I" (__PV_BITS_31_24), \
192 "I"(__PV_BITS_23_16))
194 #define __pv_add_carry_stub(x, y) \
195 __asm__("@ __pv_add_carry_stub\n" \
196 "0: movw %R0, #0\n" \
197 " adds %Q0, %1, %R0, lsl #20\n" \
199 " adc %R0, %R0, #0\n" \
200 " .pushsection .pv_table,\"a\"\n" \
201 " .long 0b - ., 1b - .\n" \
204 : "r" (x), "I" (__PV_BITS_7_0) \
208 #define __pv_stub(from,to,instr) \
209 __asm__("@ __pv_stub\n" \
212 " " instr " %0, %1, %0\n" \
213 " .pushsection .pv_table,\"a\"\n" \
219 #define __pv_add_carry_stub(x, y) \
220 __asm__("@ __pv_add_carry_stub\n" \
221 "0: movw %R0, #0\n" \
223 " adds %Q0, %1, %R0\n" \
225 " adc %R0, %R0, #0\n" \
226 " .pushsection .pv_table,\"a\"\n" \
227 " .long 0b - ., 1b - .\n" \
234 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
238 if (sizeof(phys_addr_t) == 4) {
239 __pv_stub(x, t, "add");
241 __pv_add_carry_stub(x, t);
246 static inline unsigned long __phys_to_virt(phys_addr_t x)
251 * 'unsigned long' cast discard upper word when
252 * phys_addr_t is 64 bit, and makes sure that inline
253 * assembler expression receives 32 bit argument
254 * in place where 'r' 32 bit operand is expected.
256 __pv_stub((unsigned long) x, t, "sub");
262 #define PHYS_OFFSET PLAT_PHYS_OFFSET
263 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
265 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
267 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
270 static inline unsigned long __phys_to_virt(phys_addr_t x)
272 return x - PHYS_OFFSET + PAGE_OFFSET;
277 #define virt_to_pfn(kaddr) \
278 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
281 #define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
283 #ifdef CONFIG_DEBUG_VIRTUAL
284 extern phys_addr_t __virt_to_phys(unsigned long x);
285 extern phys_addr_t __phys_addr_symbol(unsigned long x);
287 #define __virt_to_phys(x) __virt_to_phys_nodebug(x)
288 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
292 * These are *only* valid on the kernel direct mapped RAM memory.
293 * Note: Drivers should NOT use these. They are the wrong
294 * translation for translating DMA addresses. Use the driver
295 * DMA support - see dma-mapping.h.
297 #define virt_to_phys virt_to_phys
298 static inline phys_addr_t virt_to_phys(const volatile void *x)
300 return __virt_to_phys((unsigned long)(x));
303 #define phys_to_virt phys_to_virt
304 static inline void *phys_to_virt(phys_addr_t x)
306 return (void *)__phys_to_virt(x);
310 * Drivers should NOT use these either.
312 #define __pa(x) __virt_to_phys((unsigned long)(x))
313 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
314 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
315 #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
317 extern long long arch_phys_to_idmap_offset;
320 * These are for systems that have a hardware interconnect supported alias
321 * of physical memory for idmap purposes. Most cases should leave these
322 * untouched. Note: this can only return addresses less than 4GiB.
324 static inline bool arm_has_idmap_alias(void)
326 return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
329 #define IDMAP_INVALID_ADDR ((u32)~0)
331 static inline unsigned long phys_to_idmap(phys_addr_t addr)
333 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
334 addr += arch_phys_to_idmap_offset;
336 addr = IDMAP_INVALID_ADDR;
341 static inline phys_addr_t idmap_to_phys(unsigned long idmap)
343 phys_addr_t addr = idmap;
345 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
346 addr -= arch_phys_to_idmap_offset;
351 static inline unsigned long __virt_to_idmap(unsigned long x)
353 return phys_to_idmap(__virt_to_phys(x));
356 #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
359 * Virtual <-> DMA view memory address translations
360 * Again, these are *only* valid on the kernel direct mapped RAM
361 * memory. Use of these is *deprecated* (and that doesn't mean
362 * use the __ prefixed forms instead.) See dma-mapping.h.
364 #ifndef __virt_to_bus
365 #define __virt_to_bus __virt_to_phys
366 #define __bus_to_virt __phys_to_virt
367 #define __pfn_to_bus(x) __pfn_to_phys(x)
368 #define __bus_to_pfn(x) __phys_to_pfn(x)
372 * Conversion between a struct page and a physical address.
374 * page_to_pfn(page) convert a struct page * to a PFN number
375 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
377 * virt_to_page(k) convert a _valid_ virtual address to struct page *
378 * virt_addr_valid(k) indicates whether a virtual address is valid
380 #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
382 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
383 #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
384 && pfn_valid(virt_to_pfn(kaddr)))
388 #include <asm-generic/memory_model.h>