1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/ioremap.c
5 * Re-map IO memory to kernel address space so that we can access it.
7 * (C) Copyright 1995 1996 Linus Torvalds
9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
10 * Hacked to allow all architectures to build, and various cleanups
13 * This allows a driver to remap an arbitrary region of bus memory into
14 * virtual space. One should *only* use readl, writel, memcpy_toio and
15 * so on with such remapped areas.
17 * Because the ARM only has a 32-bit address space we can't address the
18 * whole of the (physical) PCI space at once. PCI huge-mode addressing
19 * allows us to circumvent this restriction by splitting PCI space into
20 * two 2GB chunks and mapping only one at a time into processor memory.
21 * We use MMU protection domains to trap any attempt to access the bank
22 * that is not currently mapped. (This isn't fully implemented yet.)
24 #include <linux/module.h>
25 #include <linux/errno.h>
27 #include <linux/vmalloc.h>
29 #include <linux/sizes.h>
32 #include <asm/cputype.h>
33 #include <asm/cacheflush.h>
34 #include <asm/early_ioremap.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
38 #include <asm/system_info.h>
40 #include <asm/mach/map.h>
41 #include <asm/mach/pci.h>
45 LIST_HEAD(static_vmlist);
47 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
48 size_t size, unsigned int mtype)
50 struct static_vm *svm;
53 list_for_each_entry(svm, &static_vmlist, list) {
55 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
57 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
60 if (vm->phys_addr > paddr ||
61 paddr + size - 1 > vm->phys_addr + vm->size - 1)
70 struct static_vm *find_static_vm_vaddr(void *vaddr)
72 struct static_vm *svm;
75 list_for_each_entry(svm, &static_vmlist, list) {
78 /* static_vmlist is ascending order */
82 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
89 void __init add_static_vm_early(struct static_vm *svm)
91 struct static_vm *curr_svm;
96 vm_area_add_early(vm);
99 list_for_each_entry(curr_svm, &static_vmlist, list) {
102 if (vm->addr > vaddr)
105 list_add_tail(&svm->list, &curr_svm->list);
108 int ioremap_page(unsigned long virt, unsigned long phys,
109 const struct mem_type *mtype)
111 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
112 __pgprot(mtype->prot_pte));
114 EXPORT_SYMBOL(ioremap_page);
116 void __check_vmalloc_seq(struct mm_struct *mm)
121 seq = init_mm.context.vmalloc_seq;
122 memcpy(pgd_offset(mm, VMALLOC_START),
123 pgd_offset_k(VMALLOC_START),
124 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
125 pgd_index(VMALLOC_START)));
126 mm->context.vmalloc_seq = seq;
127 } while (seq != init_mm.context.vmalloc_seq);
130 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
132 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
133 * the other CPUs will not see this change until their next context switch.
134 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
135 * which requires the new ioremap'd region to be referenced, the CPU will
136 * reference the _old_ region.
138 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
139 * mask the size back to 1MB aligned or we will overflow in the loop below.
141 static void unmap_area_sections(unsigned long virt, unsigned long size)
143 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
149 flush_cache_vunmap(addr, end);
150 pgd = pgd_offset_k(addr);
151 p4d = p4d_offset(pgd, addr);
152 pud = pud_offset(p4d, addr);
153 pmdp = pmd_offset(pud, addr);
157 if (!pmd_none(pmd)) {
159 * Clear the PMD from the page table, and
160 * increment the vmalloc sequence so others
161 * notice this change.
163 * Note: this is still racy on SMP machines.
166 init_mm.context.vmalloc_seq++;
169 * Free the page table, if there was one.
171 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
172 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
177 } while (addr < end);
180 * Ensure that the active_mm is up to date - we want to
181 * catch any use-after-iounmap cases.
183 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
184 __check_vmalloc_seq(current->active_mm);
186 flush_tlb_kernel_range(virt, end);
190 remap_area_sections(unsigned long virt, unsigned long pfn,
191 size_t size, const struct mem_type *type)
193 unsigned long addr = virt, end = virt + size;
200 * Remove and free any PTE-based mapping, and
201 * sync the current kernel mapping.
203 unmap_area_sections(virt, size);
205 pgd = pgd_offset_k(addr);
206 p4d = p4d_offset(pgd, addr);
207 pud = pud_offset(p4d, addr);
208 pmd = pmd_offset(pud, addr);
210 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
211 pfn += SZ_1M >> PAGE_SHIFT;
212 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
213 pfn += SZ_1M >> PAGE_SHIFT;
214 flush_pmd_entry(pmd);
218 } while (addr < end);
224 remap_area_supersections(unsigned long virt, unsigned long pfn,
225 size_t size, const struct mem_type *type)
227 unsigned long addr = virt, end = virt + size;
234 * Remove and free any PTE-based mapping, and
235 * sync the current kernel mapping.
237 unmap_area_sections(virt, size);
239 pgd = pgd_offset_k(virt);
240 p4d = p4d_offset(pgd, addr);
241 pud = pud_offset(p4d, addr);
242 pmd = pmd_offset(pud, addr);
244 unsigned long super_pmd_val, i;
246 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
248 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
250 for (i = 0; i < 8; i++) {
251 pmd[0] = __pmd(super_pmd_val);
252 pmd[1] = __pmd(super_pmd_val);
253 flush_pmd_entry(pmd);
259 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
260 } while (addr < end);
266 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
267 unsigned long offset, size_t size, unsigned int mtype, void *caller)
269 const struct mem_type *type;
272 struct vm_struct *area;
273 phys_addr_t paddr = __pfn_to_phys(pfn);
275 #ifndef CONFIG_ARM_LPAE
277 * High mappings must be supersection aligned
279 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
283 type = get_mem_type(mtype);
288 * Page align the mapping size, taking account of any offset.
290 size = PAGE_ALIGN(offset + size);
293 * Try to reuse one of the static mapping whenever possible.
295 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
296 struct static_vm *svm;
298 svm = find_static_vm_paddr(paddr, size, mtype);
300 addr = (unsigned long)svm->vm.addr;
301 addr += paddr - svm->vm.phys_addr;
302 return (void __iomem *) (offset + addr);
307 * Don't allow RAM to be mapped with mismatched attributes - this
308 * causes problems with ARMv6+
310 if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
313 area = get_vm_area_caller(size, VM_IOREMAP, caller);
316 addr = (unsigned long)area->addr;
317 area->phys_addr = paddr;
319 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
320 if (DOMAIN_IO == 0 &&
321 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
322 cpu_is_xsc3()) && pfn >= 0x100000 &&
323 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
324 area->flags |= VM_ARM_SECTION_MAPPING;
325 err = remap_area_supersections(addr, pfn, size, type);
326 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
327 area->flags |= VM_ARM_SECTION_MAPPING;
328 err = remap_area_sections(addr, pfn, size, type);
331 err = ioremap_page_range(addr, addr + size, paddr,
332 __pgprot(type->prot_pte));
335 vunmap((void *)addr);
339 flush_cache_vmap(addr, addr + size);
340 return (void __iomem *) (offset + addr);
343 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
344 unsigned int mtype, void *caller)
346 phys_addr_t last_addr;
347 unsigned long offset = phys_addr & ~PAGE_MASK;
348 unsigned long pfn = __phys_to_pfn(phys_addr);
351 * Don't allow wraparound or zero size
353 last_addr = phys_addr + size - 1;
354 if (!size || last_addr < phys_addr)
357 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
362 * Remap an arbitrary physical address space into the kernel virtual
363 * address space. Needed when the kernel wants to access high addresses
366 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
367 * have to convert them into an offset in a page-aligned mapping, but the
368 * caller shouldn't need to know that small detail.
371 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
374 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
375 __builtin_return_address(0));
377 EXPORT_SYMBOL(__arm_ioremap_pfn);
379 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
380 unsigned int, void *) =
381 __arm_ioremap_caller;
383 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
385 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
386 __builtin_return_address(0));
388 EXPORT_SYMBOL(ioremap);
390 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
392 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
393 __builtin_return_address(0));
395 EXPORT_SYMBOL(ioremap_cache);
397 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
399 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
400 __builtin_return_address(0));
402 EXPORT_SYMBOL(ioremap_wc);
405 * Remap an arbitrary physical address space into the kernel virtual
406 * address space as memory. Needed when the kernel wants to execute
407 * code in external memory. This is needed for reprogramming source
408 * clocks that would affect normal memory for example. Please see
409 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
412 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
417 mtype = MT_MEMORY_RWX;
419 mtype = MT_MEMORY_RWX_NONCACHED;
421 return __arm_ioremap_caller(phys_addr, size, mtype,
422 __builtin_return_address(0));
425 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
427 return (__force void *)arch_ioremap_caller(phys_addr, size,
429 __builtin_return_address(0));
432 void __iounmap(volatile void __iomem *io_addr)
434 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
435 struct static_vm *svm;
437 /* If this is a static mapping, we must leave it alone */
438 svm = find_static_vm_vaddr(addr);
442 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
444 struct vm_struct *vm;
446 vm = find_vm_area(addr);
449 * If this is a section based mapping we need to handle it
450 * specially as the VM subsystem does not know how to handle
453 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
454 unmap_area_sections((unsigned long)vm->addr, vm->size);
461 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
463 void iounmap(volatile void __iomem *cookie)
465 arch_iounmap(cookie);
467 EXPORT_SYMBOL(iounmap);
470 static int pci_ioremap_mem_type = MT_DEVICE;
472 void pci_ioremap_set_mem_type(int mem_type)
474 pci_ioremap_mem_type = mem_type;
477 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
479 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
481 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
482 PCI_IO_VIRT_BASE + offset + SZ_64K,
484 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
486 EXPORT_SYMBOL_GPL(pci_ioremap_io);
488 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
490 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
491 __builtin_return_address(0));
493 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
497 * Must be called after early_fixmap_init
499 void __init early_ioremap_init(void)
501 early_ioremap_setup();