1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/include/asm/memory.h
5 * Copyright (C) 2000-2002 Russell King
6 * modification for nommu, Hyok S. Choi, 2004
8 * Note: this file should not be included by non-asm/.h files
10 #ifndef __ASM_ARM_MEMORY_H
11 #define __ASM_ARM_MEMORY_H
13 #include <linux/compiler.h>
14 #include <linux/const.h>
15 #include <linux/types.h>
16 #include <linux/sizes.h>
18 #ifdef CONFIG_NEED_MACH_MEMORY_H
19 #include <mach/memory.h>
22 /* PAGE_OFFSET - the virtual address of the start of the kernel image */
23 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
28 * TASK_SIZE - the maximum size of a user space task.
29 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
31 #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
32 #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
35 * The maximum size of a 26-bit user space task.
37 #define TASK_SIZE_26 (UL(1) << 26)
40 * The module space lives between the addresses given by TASK_SIZE
41 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
43 #ifndef CONFIG_THUMB2_KERNEL
44 #define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
46 /* smaller range for Thumb-2 symbols relocation (2^24)*/
47 #define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
50 #if TASK_SIZE > MODULES_VADDR
51 #error Top of user space clashes with start of module space
55 * The highmem pkmap virtual space shares the end of the module area.
58 #define MODULES_END (PAGE_OFFSET - PMD_SIZE)
60 #define MODULES_END (PAGE_OFFSET)
64 * The XIP kernel gets mapped at the bottom of the module vm area.
65 * Since we use sections to map it, this macro replaces the physical address
66 * with its virtual address while keeping offset from the base section.
68 #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
70 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
72 * Allow 16MB-aligned ioremap pages
74 #define IOREMAP_MAX_ORDER 24
77 #define VECTORS_BASE UL(0xffff0000)
79 #else /* CONFIG_MMU */
82 extern unsigned long setup_vectors_base(void);
83 extern unsigned long vectors_base;
84 #define VECTORS_BASE vectors_base
88 * The limitation of user task size can grow up to the end of free ram region.
89 * It is difficult to define and perhaps will never meet the original meaning
90 * of this define that was meant to.
91 * Fortunately, there is no reference for this in noMMU mode, for now.
93 #define TASK_SIZE UL(0xffffffff)
95 #ifndef TASK_UNMAPPED_BASE
96 #define TASK_UNMAPPED_BASE UL(0x00000000)
100 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
104 * The module can be at any place in ram in nommu mode.
106 #define MODULES_END (END_MEM)
107 #define MODULES_VADDR PAGE_OFFSET
109 #define XIP_VIRT_ADDR(physaddr) (physaddr)
111 #endif /* !CONFIG_MMU */
113 #ifdef CONFIG_XIP_KERNEL
114 #define KERNEL_START _sdata
116 #define KERNEL_START _stext
118 #define KERNEL_END _end
121 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
124 #ifdef CONFIG_HAVE_TCM
125 #define ITCM_OFFSET UL(0xfffe0000)
126 #define DTCM_OFFSET UL(0xfffe8000)
130 * Convert a page to/from a physical address
132 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
133 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
136 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
137 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
138 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
139 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
141 #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
143 #ifdef CONFIG_XIP_KERNEL
145 * When referencing data in RAM from the XIP region in a relative manner
146 * with the MMU off, we need the relative offset between the two physical
147 * addresses. The macro below achieves this, which is:
148 * __pa(v_data) - __xip_pa(v_text)
150 #define PHYS_RELATIVE(v_data, v_text) \
151 (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \
152 ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \
153 CONFIG_XIP_PHYS_ADDR))
155 #define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text))
161 * Physical vs virtual RAM address space conversion. These are
162 * private definitions which should NOT be used outside memory.h
163 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
165 * PFNs are used to describe any physical page; this means
166 * PFN 0 == physical address 0.
169 #if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
172 * Constants used to force the right instruction encodings and shifts
173 * so that all we need to do is modify the 8-bit constant field.
175 #define __PV_BITS_31_24 0x81000000
176 #define __PV_BITS_7_0 0x81
178 extern unsigned long __pv_phys_pfn_offset;
179 extern u64 __pv_offset;
180 extern void fixup_pv_table(const void *, unsigned long);
181 extern const void *__pv_table_begin, *__pv_table_end;
183 #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
184 #define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
186 #ifndef CONFIG_THUMB2_KERNEL
187 #define __pv_stub(from,to,instr) \
188 __asm__("@ __pv_stub\n" \
189 "1: " instr " %0, %1, %2\n" \
190 " .pushsection .pv_table,\"a\"\n" \
194 : "r" (from), "I" (__PV_BITS_31_24))
196 #define __pv_add_carry_stub(x, y) \
197 __asm__("@ __pv_add_carry_stub\n" \
198 "0: movw %R0, #0\n" \
199 " adds %Q0, %1, %R0, lsl #24\n" \
201 " adc %R0, %R0, #0\n" \
202 " .pushsection .pv_table,\"a\"\n" \
203 " .long 0b - ., 1b - .\n" \
206 : "r" (x), "I" (__PV_BITS_7_0) \
210 #define __pv_stub(from,to,instr) \
211 __asm__("@ __pv_stub\n" \
214 " " instr " %0, %1, %0\n" \
215 " .pushsection .pv_table,\"a\"\n" \
221 #define __pv_add_carry_stub(x, y) \
222 __asm__("@ __pv_add_carry_stub\n" \
223 "0: movw %R0, #0\n" \
225 " adds %Q0, %1, %R0\n" \
227 " adc %R0, %R0, #0\n" \
228 " .pushsection .pv_table,\"a\"\n" \
229 " .long 0b - ., 1b - .\n" \
236 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
240 if (sizeof(phys_addr_t) == 4) {
241 __pv_stub(x, t, "add");
243 __pv_add_carry_stub(x, t);
248 static inline unsigned long __phys_to_virt(phys_addr_t x)
253 * 'unsigned long' cast discard upper word when
254 * phys_addr_t is 64 bit, and makes sure that inline
255 * assembler expression receives 32 bit argument
256 * in place where 'r' 32 bit operand is expected.
258 __pv_stub((unsigned long) x, t, "sub");
264 #define PHYS_OFFSET PLAT_PHYS_OFFSET
265 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
267 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
269 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
272 static inline unsigned long __phys_to_virt(phys_addr_t x)
274 return x - PHYS_OFFSET + PAGE_OFFSET;
279 #define virt_to_pfn(kaddr) \
280 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
283 #define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
285 #ifdef CONFIG_DEBUG_VIRTUAL
286 extern phys_addr_t __virt_to_phys(unsigned long x);
287 extern phys_addr_t __phys_addr_symbol(unsigned long x);
289 #define __virt_to_phys(x) __virt_to_phys_nodebug(x)
290 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
294 * These are *only* valid on the kernel direct mapped RAM memory.
295 * Note: Drivers should NOT use these. They are the wrong
296 * translation for translating DMA addresses. Use the driver
297 * DMA support - see dma-mapping.h.
299 #define virt_to_phys virt_to_phys
300 static inline phys_addr_t virt_to_phys(const volatile void *x)
302 return __virt_to_phys((unsigned long)(x));
305 #define phys_to_virt phys_to_virt
306 static inline void *phys_to_virt(phys_addr_t x)
308 return (void *)__phys_to_virt(x);
312 * Drivers should NOT use these either.
314 #define __pa(x) __virt_to_phys((unsigned long)(x))
315 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
316 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
317 #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
319 extern long long arch_phys_to_idmap_offset;
322 * These are for systems that have a hardware interconnect supported alias
323 * of physical memory for idmap purposes. Most cases should leave these
324 * untouched. Note: this can only return addresses less than 4GiB.
326 static inline bool arm_has_idmap_alias(void)
328 return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
331 #define IDMAP_INVALID_ADDR ((u32)~0)
333 static inline unsigned long phys_to_idmap(phys_addr_t addr)
335 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
336 addr += arch_phys_to_idmap_offset;
338 addr = IDMAP_INVALID_ADDR;
343 static inline phys_addr_t idmap_to_phys(unsigned long idmap)
345 phys_addr_t addr = idmap;
347 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
348 addr -= arch_phys_to_idmap_offset;
353 static inline unsigned long __virt_to_idmap(unsigned long x)
355 return phys_to_idmap(__virt_to_phys(x));
358 #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
361 * Virtual <-> DMA view memory address translations
362 * Again, these are *only* valid on the kernel direct mapped RAM
363 * memory. Use of these is *deprecated* (and that doesn't mean
364 * use the __ prefixed forms instead.) See dma-mapping.h.
366 #ifndef __virt_to_bus
367 #define __virt_to_bus __virt_to_phys
368 #define __bus_to_virt __phys_to_virt
369 #define __pfn_to_bus(x) __pfn_to_phys(x)
370 #define __bus_to_pfn(x) __phys_to_pfn(x)
374 * Conversion between a struct page and a physical address.
376 * page_to_pfn(page) convert a struct page * to a PFN number
377 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
379 * virt_to_page(k) convert a _valid_ virtual address to struct page *
380 * virt_addr_valid(k) indicates whether a virtual address is valid
382 #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
384 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
385 #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
386 && pfn_valid(virt_to_pfn(kaddr)))
390 #include <asm-generic/memory_model.h>