1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/ioremap.c
5 * Re-map IO memory to kernel address space so that we can access it.
7 * (C) Copyright 1995 1996 Linus Torvalds
9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
10 * Hacked to allow all architectures to build, and various cleanups
13 * This allows a driver to remap an arbitrary region of bus memory into
14 * virtual space. One should *only* use readl, writel, memcpy_toio and
15 * so on with such remapped areas.
17 * Because the ARM only has a 32-bit address space we can't address the
18 * whole of the (physical) PCI space at once. PCI huge-mode addressing
19 * allows us to circumvent this restriction by splitting PCI space into
20 * two 2GB chunks and mapping only one at a time into processor memory.
21 * We use MMU protection domains to trap any attempt to access the bank
22 * that is not currently mapped. (This isn't fully implemented yet.)
24 #include <linux/module.h>
25 #include <linux/errno.h>
27 #include <linux/vmalloc.h>
29 #include <linux/sizes.h>
30 #include <linux/memblock.h>
33 #include <asm/cputype.h>
34 #include <asm/cacheflush.h>
35 #include <asm/early_ioremap.h>
36 #include <asm/mmu_context.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlbflush.h>
39 #include <asm/set_memory.h>
40 #include <asm/system_info.h>
42 #include <asm/mach/map.h>
43 #include <asm/mach/pci.h>
47 LIST_HEAD(static_vmlist);
49 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
50 size_t size, unsigned int mtype)
52 struct static_vm *svm;
55 list_for_each_entry(svm, &static_vmlist, list) {
57 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
59 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
62 if (vm->phys_addr > paddr ||
63 paddr + size - 1 > vm->phys_addr + vm->size - 1)
72 struct static_vm *find_static_vm_vaddr(void *vaddr)
74 struct static_vm *svm;
77 list_for_each_entry(svm, &static_vmlist, list) {
80 /* static_vmlist is ascending order */
84 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
91 void __init add_static_vm_early(struct static_vm *svm)
93 struct static_vm *curr_svm;
98 vm_area_add_early(vm);
101 list_for_each_entry(curr_svm, &static_vmlist, list) {
104 if (vm->addr > vaddr)
107 list_add_tail(&svm->list, &curr_svm->list);
110 int ioremap_page(unsigned long virt, unsigned long phys,
111 const struct mem_type *mtype)
113 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
114 __pgprot(mtype->prot_pte));
116 EXPORT_SYMBOL(ioremap_page);
118 void __check_vmalloc_seq(struct mm_struct *mm)
123 seq = init_mm.context.vmalloc_seq;
124 memcpy(pgd_offset(mm, VMALLOC_START),
125 pgd_offset_k(VMALLOC_START),
126 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
127 pgd_index(VMALLOC_START)));
128 mm->context.vmalloc_seq = seq;
129 } while (seq != init_mm.context.vmalloc_seq);
132 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
134 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
135 * the other CPUs will not see this change until their next context switch.
136 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
137 * which requires the new ioremap'd region to be referenced, the CPU will
138 * reference the _old_ region.
140 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
141 * mask the size back to 1MB aligned or we will overflow in the loop below.
143 static void unmap_area_sections(unsigned long virt, unsigned long size)
145 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
146 pmd_t *pmdp = pmd_off_k(addr);
151 if (!pmd_none(pmd)) {
153 * Clear the PMD from the page table, and
154 * increment the vmalloc sequence so others
155 * notice this change.
157 * Note: this is still racy on SMP machines.
160 init_mm.context.vmalloc_seq++;
163 * Free the page table, if there was one.
165 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
166 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
171 } while (addr < end);
174 * Ensure that the active_mm is up to date - we want to
175 * catch any use-after-iounmap cases.
177 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
178 __check_vmalloc_seq(current->active_mm);
180 flush_tlb_kernel_range(virt, end);
184 remap_area_sections(unsigned long virt, unsigned long pfn,
185 size_t size, const struct mem_type *type)
187 unsigned long addr = virt, end = virt + size;
188 pmd_t *pmd = pmd_off_k(addr);
191 * Remove and free any PTE-based mapping, and
192 * sync the current kernel mapping.
194 unmap_area_sections(virt, size);
197 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
198 pfn += SZ_1M >> PAGE_SHIFT;
199 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
200 pfn += SZ_1M >> PAGE_SHIFT;
201 flush_pmd_entry(pmd);
205 } while (addr < end);
211 remap_area_supersections(unsigned long virt, unsigned long pfn,
212 size_t size, const struct mem_type *type)
214 unsigned long addr = virt, end = virt + size;
215 pmd_t *pmd = pmd_off_k(addr);
218 * Remove and free any PTE-based mapping, and
219 * sync the current kernel mapping.
221 unmap_area_sections(virt, size);
223 unsigned long super_pmd_val, i;
225 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
227 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
229 for (i = 0; i < 8; i++) {
230 pmd[0] = __pmd(super_pmd_val);
231 pmd[1] = __pmd(super_pmd_val);
232 flush_pmd_entry(pmd);
238 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
239 } while (addr < end);
245 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
246 unsigned long offset, size_t size, unsigned int mtype, void *caller)
248 const struct mem_type *type;
251 struct vm_struct *area;
252 phys_addr_t paddr = __pfn_to_phys(pfn);
254 #ifndef CONFIG_ARM_LPAE
256 * High mappings must be supersection aligned
258 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
262 type = get_mem_type(mtype);
267 * Page align the mapping size, taking account of any offset.
269 size = PAGE_ALIGN(offset + size);
272 * Try to reuse one of the static mapping whenever possible.
274 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
275 struct static_vm *svm;
277 svm = find_static_vm_paddr(paddr, size, mtype);
279 addr = (unsigned long)svm->vm.addr;
280 addr += paddr - svm->vm.phys_addr;
281 return (void __iomem *) (offset + addr);
286 * Don't allow RAM to be mapped with mismatched attributes - this
287 * causes problems with ARMv6+
289 if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
290 mtype != MT_MEMORY_RW))
293 area = get_vm_area_caller(size, VM_IOREMAP, caller);
296 addr = (unsigned long)area->addr;
297 area->phys_addr = paddr;
299 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
300 if (DOMAIN_IO == 0 &&
301 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
302 cpu_is_xsc3()) && pfn >= 0x100000 &&
303 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
304 area->flags |= VM_ARM_SECTION_MAPPING;
305 err = remap_area_supersections(addr, pfn, size, type);
306 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
307 area->flags |= VM_ARM_SECTION_MAPPING;
308 err = remap_area_sections(addr, pfn, size, type);
311 err = ioremap_page_range(addr, addr + size, paddr,
312 __pgprot(type->prot_pte));
315 vunmap((void *)addr);
319 flush_cache_vmap(addr, addr + size);
320 return (void __iomem *) (offset + addr);
323 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
324 unsigned int mtype, void *caller)
326 phys_addr_t last_addr;
327 unsigned long offset = phys_addr & ~PAGE_MASK;
328 unsigned long pfn = __phys_to_pfn(phys_addr);
331 * Don't allow wraparound or zero size
333 last_addr = phys_addr + size - 1;
334 if (!size || last_addr < phys_addr)
337 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
342 * Remap an arbitrary physical address space into the kernel virtual
343 * address space. Needed when the kernel wants to access high addresses
346 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
347 * have to convert them into an offset in a page-aligned mapping, but the
348 * caller shouldn't need to know that small detail.
351 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
354 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
355 __builtin_return_address(0));
357 EXPORT_SYMBOL(__arm_ioremap_pfn);
359 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
360 unsigned int, void *) =
361 __arm_ioremap_caller;
363 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
365 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
366 __builtin_return_address(0));
368 EXPORT_SYMBOL(ioremap);
370 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
372 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
373 __builtin_return_address(0));
375 EXPORT_SYMBOL(ioremap_cache);
377 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
379 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
380 __builtin_return_address(0));
382 EXPORT_SYMBOL(ioremap_wc);
385 * Remap an arbitrary physical address space into the kernel virtual
386 * address space as memory. Needed when the kernel wants to execute
387 * code in external memory. This is needed for reprogramming source
388 * clocks that would affect normal memory for example. Please see
389 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
392 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
397 mtype = MT_MEMORY_RWX;
399 mtype = MT_MEMORY_RWX_NONCACHED;
401 return __arm_ioremap_caller(phys_addr, size, mtype,
402 __builtin_return_address(0));
405 void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
407 set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
410 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
412 return (__force void *)arch_ioremap_caller(phys_addr, size,
414 __builtin_return_address(0));
417 void __iounmap(volatile void __iomem *io_addr)
419 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
420 struct static_vm *svm;
422 /* If this is a static mapping, we must leave it alone */
423 svm = find_static_vm_vaddr(addr);
427 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
429 struct vm_struct *vm;
431 vm = find_vm_area(addr);
434 * If this is a section based mapping we need to handle it
435 * specially as the VM subsystem does not know how to handle
438 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
439 unmap_area_sections((unsigned long)vm->addr, vm->size);
446 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
448 void iounmap(volatile void __iomem *cookie)
450 arch_iounmap(cookie);
452 EXPORT_SYMBOL(iounmap);
455 static int pci_ioremap_mem_type = MT_DEVICE;
457 void pci_ioremap_set_mem_type(int mem_type)
459 pci_ioremap_mem_type = mem_type;
462 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
464 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
466 if (!(res->flags & IORESOURCE_IO))
469 if (res->end > IO_SPACE_LIMIT)
472 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
473 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
475 EXPORT_SYMBOL(pci_remap_iospace);
477 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
479 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
480 __builtin_return_address(0));
482 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
486 * Must be called after early_fixmap_init
488 void __init early_ioremap_init(void)
490 early_ioremap_setup();