RISC-V: Introduce sv48 support without relocatable kernel
[linux-2.6-microblaze.git] / arch / arm / mm / ioremap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/ioremap.c
4  *
5  * Re-map IO memory to kernel address space so that we can access it.
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  *
9  * Hacked for ARM by Phil Blundell <philb@gnu.org>
10  * Hacked to allow all architectures to build, and various cleanups
11  * by Russell King
12  *
13  * This allows a driver to remap an arbitrary region of bus memory into
14  * virtual space.  One should *only* use readl, writel, memcpy_toio and
15  * so on with such remapped areas.
16  *
17  * Because the ARM only has a 32-bit address space we can't address the
18  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
19  * allows us to circumvent this restriction by splitting PCI space into
20  * two 2GB chunks and mapping only one at a time into processor memory.
21  * We use MMU protection domains to trap any attempt to access the bank
22  * that is not currently mapped.  (This isn't fully implemented yet.)
23  */
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/mm.h>
27 #include <linux/vmalloc.h>
28 #include <linux/io.h>
29 #include <linux/sizes.h>
30 #include <linux/memblock.h>
31
32 #include <asm/cp15.h>
33 #include <asm/cputype.h>
34 #include <asm/cacheflush.h>
35 #include <asm/early_ioremap.h>
36 #include <asm/mmu_context.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlbflush.h>
39 #include <asm/set_memory.h>
40 #include <asm/system_info.h>
41
42 #include <asm/mach/map.h>
43 #include <asm/mach/pci.h>
44 #include "mm.h"
45
46
47 LIST_HEAD(static_vmlist);
48
49 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
50                         size_t size, unsigned int mtype)
51 {
52         struct static_vm *svm;
53         struct vm_struct *vm;
54
55         list_for_each_entry(svm, &static_vmlist, list) {
56                 vm = &svm->vm;
57                 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
58                         continue;
59                 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
60                         continue;
61
62                 if (vm->phys_addr > paddr ||
63                         paddr + size - 1 > vm->phys_addr + vm->size - 1)
64                         continue;
65
66                 return svm;
67         }
68
69         return NULL;
70 }
71
72 struct static_vm *find_static_vm_vaddr(void *vaddr)
73 {
74         struct static_vm *svm;
75         struct vm_struct *vm;
76
77         list_for_each_entry(svm, &static_vmlist, list) {
78                 vm = &svm->vm;
79
80                 /* static_vmlist is ascending order */
81                 if (vm->addr > vaddr)
82                         break;
83
84                 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
85                         return svm;
86         }
87
88         return NULL;
89 }
90
91 void __init add_static_vm_early(struct static_vm *svm)
92 {
93         struct static_vm *curr_svm;
94         struct vm_struct *vm;
95         void *vaddr;
96
97         vm = &svm->vm;
98         vm_area_add_early(vm);
99         vaddr = vm->addr;
100
101         list_for_each_entry(curr_svm, &static_vmlist, list) {
102                 vm = &curr_svm->vm;
103
104                 if (vm->addr > vaddr)
105                         break;
106         }
107         list_add_tail(&svm->list, &curr_svm->list);
108 }
109
110 int ioremap_page(unsigned long virt, unsigned long phys,
111                  const struct mem_type *mtype)
112 {
113         return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
114                                   __pgprot(mtype->prot_pte));
115 }
116 EXPORT_SYMBOL(ioremap_page);
117
118 void __check_vmalloc_seq(struct mm_struct *mm)
119 {
120         unsigned int seq;
121
122         do {
123                 seq = init_mm.context.vmalloc_seq;
124                 memcpy(pgd_offset(mm, VMALLOC_START),
125                        pgd_offset_k(VMALLOC_START),
126                        sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
127                                         pgd_index(VMALLOC_START)));
128                 mm->context.vmalloc_seq = seq;
129         } while (seq != init_mm.context.vmalloc_seq);
130 }
131
132 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
133 /*
134  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
135  * the other CPUs will not see this change until their next context switch.
136  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
137  * which requires the new ioremap'd region to be referenced, the CPU will
138  * reference the _old_ region.
139  *
140  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
141  * mask the size back to 1MB aligned or we will overflow in the loop below.
142  */
143 static void unmap_area_sections(unsigned long virt, unsigned long size)
144 {
145         unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
146         pmd_t *pmdp = pmd_off_k(addr);
147
148         do {
149                 pmd_t pmd = *pmdp;
150
151                 if (!pmd_none(pmd)) {
152                         /*
153                          * Clear the PMD from the page table, and
154                          * increment the vmalloc sequence so others
155                          * notice this change.
156                          *
157                          * Note: this is still racy on SMP machines.
158                          */
159                         pmd_clear(pmdp);
160                         init_mm.context.vmalloc_seq++;
161
162                         /*
163                          * Free the page table, if there was one.
164                          */
165                         if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
166                                 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
167                 }
168
169                 addr += PMD_SIZE;
170                 pmdp += 2;
171         } while (addr < end);
172
173         /*
174          * Ensure that the active_mm is up to date - we want to
175          * catch any use-after-iounmap cases.
176          */
177         if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
178                 __check_vmalloc_seq(current->active_mm);
179
180         flush_tlb_kernel_range(virt, end);
181 }
182
183 static int
184 remap_area_sections(unsigned long virt, unsigned long pfn,
185                     size_t size, const struct mem_type *type)
186 {
187         unsigned long addr = virt, end = virt + size;
188         pmd_t *pmd = pmd_off_k(addr);
189
190         /*
191          * Remove and free any PTE-based mapping, and
192          * sync the current kernel mapping.
193          */
194         unmap_area_sections(virt, size);
195
196         do {
197                 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
198                 pfn += SZ_1M >> PAGE_SHIFT;
199                 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
200                 pfn += SZ_1M >> PAGE_SHIFT;
201                 flush_pmd_entry(pmd);
202
203                 addr += PMD_SIZE;
204                 pmd += 2;
205         } while (addr < end);
206
207         return 0;
208 }
209
210 static int
211 remap_area_supersections(unsigned long virt, unsigned long pfn,
212                          size_t size, const struct mem_type *type)
213 {
214         unsigned long addr = virt, end = virt + size;
215         pmd_t *pmd = pmd_off_k(addr);
216
217         /*
218          * Remove and free any PTE-based mapping, and
219          * sync the current kernel mapping.
220          */
221         unmap_area_sections(virt, size);
222         do {
223                 unsigned long super_pmd_val, i;
224
225                 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
226                                 PMD_SECT_SUPER;
227                 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
228
229                 for (i = 0; i < 8; i++) {
230                         pmd[0] = __pmd(super_pmd_val);
231                         pmd[1] = __pmd(super_pmd_val);
232                         flush_pmd_entry(pmd);
233
234                         addr += PMD_SIZE;
235                         pmd += 2;
236                 }
237
238                 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
239         } while (addr < end);
240
241         return 0;
242 }
243 #endif
244
245 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
246         unsigned long offset, size_t size, unsigned int mtype, void *caller)
247 {
248         const struct mem_type *type;
249         int err;
250         unsigned long addr;
251         struct vm_struct *area;
252         phys_addr_t paddr = __pfn_to_phys(pfn);
253
254 #ifndef CONFIG_ARM_LPAE
255         /*
256          * High mappings must be supersection aligned
257          */
258         if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
259                 return NULL;
260 #endif
261
262         type = get_mem_type(mtype);
263         if (!type)
264                 return NULL;
265
266         /*
267          * Page align the mapping size, taking account of any offset.
268          */
269         size = PAGE_ALIGN(offset + size);
270
271         /*
272          * Try to reuse one of the static mapping whenever possible.
273          */
274         if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
275                 struct static_vm *svm;
276
277                 svm = find_static_vm_paddr(paddr, size, mtype);
278                 if (svm) {
279                         addr = (unsigned long)svm->vm.addr;
280                         addr += paddr - svm->vm.phys_addr;
281                         return (void __iomem *) (offset + addr);
282                 }
283         }
284
285         /*
286          * Don't allow RAM to be mapped with mismatched attributes - this
287          * causes problems with ARMv6+
288          */
289         if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
290                     mtype != MT_MEMORY_RW))
291                 return NULL;
292
293         area = get_vm_area_caller(size, VM_IOREMAP, caller);
294         if (!area)
295                 return NULL;
296         addr = (unsigned long)area->addr;
297         area->phys_addr = paddr;
298
299 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
300         if (DOMAIN_IO == 0 &&
301             (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
302                cpu_is_xsc3()) && pfn >= 0x100000 &&
303                !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
304                 area->flags |= VM_ARM_SECTION_MAPPING;
305                 err = remap_area_supersections(addr, pfn, size, type);
306         } else if (!((paddr | size | addr) & ~PMD_MASK)) {
307                 area->flags |= VM_ARM_SECTION_MAPPING;
308                 err = remap_area_sections(addr, pfn, size, type);
309         } else
310 #endif
311                 err = ioremap_page_range(addr, addr + size, paddr,
312                                          __pgprot(type->prot_pte));
313
314         if (err) {
315                 vunmap((void *)addr);
316                 return NULL;
317         }
318
319         flush_cache_vmap(addr, addr + size);
320         return (void __iomem *) (offset + addr);
321 }
322
323 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
324         unsigned int mtype, void *caller)
325 {
326         phys_addr_t last_addr;
327         unsigned long offset = phys_addr & ~PAGE_MASK;
328         unsigned long pfn = __phys_to_pfn(phys_addr);
329
330         /*
331          * Don't allow wraparound or zero size
332          */
333         last_addr = phys_addr + size - 1;
334         if (!size || last_addr < phys_addr)
335                 return NULL;
336
337         return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
338                         caller);
339 }
340
341 /*
342  * Remap an arbitrary physical address space into the kernel virtual
343  * address space. Needed when the kernel wants to access high addresses
344  * directly.
345  *
346  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
347  * have to convert them into an offset in a page-aligned mapping, but the
348  * caller shouldn't need to know that small detail.
349  */
350 void __iomem *
351 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
352                   unsigned int mtype)
353 {
354         return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
355                                         __builtin_return_address(0));
356 }
357 EXPORT_SYMBOL(__arm_ioremap_pfn);
358
359 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
360                                       unsigned int, void *) =
361         __arm_ioremap_caller;
362
363 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
364 {
365         return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
366                                    __builtin_return_address(0));
367 }
368 EXPORT_SYMBOL(ioremap);
369
370 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
371 {
372         return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
373                                    __builtin_return_address(0));
374 }
375 EXPORT_SYMBOL(ioremap_cache);
376
377 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
378 {
379         return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
380                                    __builtin_return_address(0));
381 }
382 EXPORT_SYMBOL(ioremap_wc);
383
384 /*
385  * Remap an arbitrary physical address space into the kernel virtual
386  * address space as memory. Needed when the kernel wants to execute
387  * code in external memory. This is needed for reprogramming source
388  * clocks that would affect normal memory for example. Please see
389  * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
390  */
391 void __iomem *
392 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
393 {
394         unsigned int mtype;
395
396         if (cached)
397                 mtype = MT_MEMORY_RWX;
398         else
399                 mtype = MT_MEMORY_RWX_NONCACHED;
400
401         return __arm_ioremap_caller(phys_addr, size, mtype,
402                         __builtin_return_address(0));
403 }
404
405 void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
406 {
407         set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
408 }
409
410 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
411 {
412         return (__force void *)arch_ioremap_caller(phys_addr, size,
413                                                    MT_MEMORY_RW,
414                                                    __builtin_return_address(0));
415 }
416
417 void __iounmap(volatile void __iomem *io_addr)
418 {
419         void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
420         struct static_vm *svm;
421
422         /* If this is a static mapping, we must leave it alone */
423         svm = find_static_vm_vaddr(addr);
424         if (svm)
425                 return;
426
427 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
428         {
429                 struct vm_struct *vm;
430
431                 vm = find_vm_area(addr);
432
433                 /*
434                  * If this is a section based mapping we need to handle it
435                  * specially as the VM subsystem does not know how to handle
436                  * such a beast.
437                  */
438                 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
439                         unmap_area_sections((unsigned long)vm->addr, vm->size);
440         }
441 #endif
442
443         vunmap(addr);
444 }
445
446 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
447
448 void iounmap(volatile void __iomem *cookie)
449 {
450         arch_iounmap(cookie);
451 }
452 EXPORT_SYMBOL(iounmap);
453
454 #ifdef CONFIG_PCI
455 static int pci_ioremap_mem_type = MT_DEVICE;
456
457 void pci_ioremap_set_mem_type(int mem_type)
458 {
459         pci_ioremap_mem_type = mem_type;
460 }
461
462 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
463 {
464         unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
465
466         if (!(res->flags & IORESOURCE_IO))
467                 return -EINVAL;
468
469         if (res->end > IO_SPACE_LIMIT)
470                 return -EINVAL;
471
472         return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
473                                   __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
474 }
475 EXPORT_SYMBOL(pci_remap_iospace);
476
477 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
478 {
479         return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
480                                    __builtin_return_address(0));
481 }
482 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
483 #endif
484
485 /*
486  * Must be called after early_fixmap_init
487  */
488 void __init early_ioremap_init(void)
489 {
490         early_ioremap_setup();
491 }