Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / arch / arm / mm / ioremap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/ioremap.c
4  *
5  * Re-map IO memory to kernel address space so that we can access it.
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  *
9  * Hacked for ARM by Phil Blundell <philb@gnu.org>
10  * Hacked to allow all architectures to build, and various cleanups
11  * by Russell King
12  *
13  * This allows a driver to remap an arbitrary region of bus memory into
14  * virtual space.  One should *only* use readl, writel, memcpy_toio and
15  * so on with such remapped areas.
16  *
17  * Because the ARM only has a 32-bit address space we can't address the
18  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
19  * allows us to circumvent this restriction by splitting PCI space into
20  * two 2GB chunks and mapping only one at a time into processor memory.
21  * We use MMU protection domains to trap any attempt to access the bank
22  * that is not currently mapped.  (This isn't fully implemented yet.)
23  */
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/mm.h>
27 #include <linux/vmalloc.h>
28 #include <linux/io.h>
29 #include <linux/sizes.h>
30
31 #include <asm/cp15.h>
32 #include <asm/cputype.h>
33 #include <asm/cacheflush.h>
34 #include <asm/early_ioremap.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
38 #include <asm/system_info.h>
39
40 #include <asm/mach/map.h>
41 #include <asm/mach/pci.h>
42 #include "mm.h"
43
44
45 LIST_HEAD(static_vmlist);
46
47 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
48                         size_t size, unsigned int mtype)
49 {
50         struct static_vm *svm;
51         struct vm_struct *vm;
52
53         list_for_each_entry(svm, &static_vmlist, list) {
54                 vm = &svm->vm;
55                 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
56                         continue;
57                 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
58                         continue;
59
60                 if (vm->phys_addr > paddr ||
61                         paddr + size - 1 > vm->phys_addr + vm->size - 1)
62                         continue;
63
64                 return svm;
65         }
66
67         return NULL;
68 }
69
70 struct static_vm *find_static_vm_vaddr(void *vaddr)
71 {
72         struct static_vm *svm;
73         struct vm_struct *vm;
74
75         list_for_each_entry(svm, &static_vmlist, list) {
76                 vm = &svm->vm;
77
78                 /* static_vmlist is ascending order */
79                 if (vm->addr > vaddr)
80                         break;
81
82                 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
83                         return svm;
84         }
85
86         return NULL;
87 }
88
89 void __init add_static_vm_early(struct static_vm *svm)
90 {
91         struct static_vm *curr_svm;
92         struct vm_struct *vm;
93         void *vaddr;
94
95         vm = &svm->vm;
96         vm_area_add_early(vm);
97         vaddr = vm->addr;
98
99         list_for_each_entry(curr_svm, &static_vmlist, list) {
100                 vm = &curr_svm->vm;
101
102                 if (vm->addr > vaddr)
103                         break;
104         }
105         list_add_tail(&svm->list, &curr_svm->list);
106 }
107
108 int ioremap_page(unsigned long virt, unsigned long phys,
109                  const struct mem_type *mtype)
110 {
111         return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
112                                   __pgprot(mtype->prot_pte));
113 }
114 EXPORT_SYMBOL(ioremap_page);
115
116 void __check_vmalloc_seq(struct mm_struct *mm)
117 {
118         unsigned int seq;
119
120         do {
121                 seq = init_mm.context.vmalloc_seq;
122                 memcpy(pgd_offset(mm, VMALLOC_START),
123                        pgd_offset_k(VMALLOC_START),
124                        sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
125                                         pgd_index(VMALLOC_START)));
126                 mm->context.vmalloc_seq = seq;
127         } while (seq != init_mm.context.vmalloc_seq);
128 }
129
130 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
131 /*
132  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
133  * the other CPUs will not see this change until their next context switch.
134  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
135  * which requires the new ioremap'd region to be referenced, the CPU will
136  * reference the _old_ region.
137  *
138  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
139  * mask the size back to 1MB aligned or we will overflow in the loop below.
140  */
141 static void unmap_area_sections(unsigned long virt, unsigned long size)
142 {
143         unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
144         pmd_t *pmdp = pmd_off_k(addr);
145
146         do {
147                 pmd_t pmd = *pmdp;
148
149                 if (!pmd_none(pmd)) {
150                         /*
151                          * Clear the PMD from the page table, and
152                          * increment the vmalloc sequence so others
153                          * notice this change.
154                          *
155                          * Note: this is still racy on SMP machines.
156                          */
157                         pmd_clear(pmdp);
158                         init_mm.context.vmalloc_seq++;
159
160                         /*
161                          * Free the page table, if there was one.
162                          */
163                         if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
164                                 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
165                 }
166
167                 addr += PMD_SIZE;
168                 pmdp += 2;
169         } while (addr < end);
170
171         /*
172          * Ensure that the active_mm is up to date - we want to
173          * catch any use-after-iounmap cases.
174          */
175         if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
176                 __check_vmalloc_seq(current->active_mm);
177
178         flush_tlb_kernel_range(virt, end);
179 }
180
181 static int
182 remap_area_sections(unsigned long virt, unsigned long pfn,
183                     size_t size, const struct mem_type *type)
184 {
185         unsigned long addr = virt, end = virt + size;
186         pmd_t *pmd = pmd_off_k(addr);
187
188         /*
189          * Remove and free any PTE-based mapping, and
190          * sync the current kernel mapping.
191          */
192         unmap_area_sections(virt, size);
193
194         do {
195                 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
196                 pfn += SZ_1M >> PAGE_SHIFT;
197                 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
198                 pfn += SZ_1M >> PAGE_SHIFT;
199                 flush_pmd_entry(pmd);
200
201                 addr += PMD_SIZE;
202                 pmd += 2;
203         } while (addr < end);
204
205         return 0;
206 }
207
208 static int
209 remap_area_supersections(unsigned long virt, unsigned long pfn,
210                          size_t size, const struct mem_type *type)
211 {
212         unsigned long addr = virt, end = virt + size;
213         pmd_t *pmd = pmd_off_k(addr);
214
215         /*
216          * Remove and free any PTE-based mapping, and
217          * sync the current kernel mapping.
218          */
219         unmap_area_sections(virt, size);
220         do {
221                 unsigned long super_pmd_val, i;
222
223                 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
224                                 PMD_SECT_SUPER;
225                 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
226
227                 for (i = 0; i < 8; i++) {
228                         pmd[0] = __pmd(super_pmd_val);
229                         pmd[1] = __pmd(super_pmd_val);
230                         flush_pmd_entry(pmd);
231
232                         addr += PMD_SIZE;
233                         pmd += 2;
234                 }
235
236                 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
237         } while (addr < end);
238
239         return 0;
240 }
241 #endif
242
243 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
244         unsigned long offset, size_t size, unsigned int mtype, void *caller)
245 {
246         const struct mem_type *type;
247         int err;
248         unsigned long addr;
249         struct vm_struct *area;
250         phys_addr_t paddr = __pfn_to_phys(pfn);
251
252 #ifndef CONFIG_ARM_LPAE
253         /*
254          * High mappings must be supersection aligned
255          */
256         if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
257                 return NULL;
258 #endif
259
260         type = get_mem_type(mtype);
261         if (!type)
262                 return NULL;
263
264         /*
265          * Page align the mapping size, taking account of any offset.
266          */
267         size = PAGE_ALIGN(offset + size);
268
269         /*
270          * Try to reuse one of the static mapping whenever possible.
271          */
272         if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
273                 struct static_vm *svm;
274
275                 svm = find_static_vm_paddr(paddr, size, mtype);
276                 if (svm) {
277                         addr = (unsigned long)svm->vm.addr;
278                         addr += paddr - svm->vm.phys_addr;
279                         return (void __iomem *) (offset + addr);
280                 }
281         }
282
283         /*
284          * Don't allow RAM to be mapped with mismatched attributes - this
285          * causes problems with ARMv6+
286          */
287         if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
288                 return NULL;
289
290         area = get_vm_area_caller(size, VM_IOREMAP, caller);
291         if (!area)
292                 return NULL;
293         addr = (unsigned long)area->addr;
294         area->phys_addr = paddr;
295
296 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
297         if (DOMAIN_IO == 0 &&
298             (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
299                cpu_is_xsc3()) && pfn >= 0x100000 &&
300                !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
301                 area->flags |= VM_ARM_SECTION_MAPPING;
302                 err = remap_area_supersections(addr, pfn, size, type);
303         } else if (!((paddr | size | addr) & ~PMD_MASK)) {
304                 area->flags |= VM_ARM_SECTION_MAPPING;
305                 err = remap_area_sections(addr, pfn, size, type);
306         } else
307 #endif
308                 err = ioremap_page_range(addr, addr + size, paddr,
309                                          __pgprot(type->prot_pte));
310
311         if (err) {
312                 vunmap((void *)addr);
313                 return NULL;
314         }
315
316         flush_cache_vmap(addr, addr + size);
317         return (void __iomem *) (offset + addr);
318 }
319
320 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
321         unsigned int mtype, void *caller)
322 {
323         phys_addr_t last_addr;
324         unsigned long offset = phys_addr & ~PAGE_MASK;
325         unsigned long pfn = __phys_to_pfn(phys_addr);
326
327         /*
328          * Don't allow wraparound or zero size
329          */
330         last_addr = phys_addr + size - 1;
331         if (!size || last_addr < phys_addr)
332                 return NULL;
333
334         return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
335                         caller);
336 }
337
338 /*
339  * Remap an arbitrary physical address space into the kernel virtual
340  * address space. Needed when the kernel wants to access high addresses
341  * directly.
342  *
343  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
344  * have to convert them into an offset in a page-aligned mapping, but the
345  * caller shouldn't need to know that small detail.
346  */
347 void __iomem *
348 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
349                   unsigned int mtype)
350 {
351         return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
352                                         __builtin_return_address(0));
353 }
354 EXPORT_SYMBOL(__arm_ioremap_pfn);
355
356 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
357                                       unsigned int, void *) =
358         __arm_ioremap_caller;
359
360 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
361 {
362         return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
363                                    __builtin_return_address(0));
364 }
365 EXPORT_SYMBOL(ioremap);
366
367 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
368 {
369         return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
370                                    __builtin_return_address(0));
371 }
372 EXPORT_SYMBOL(ioremap_cache);
373
374 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
375 {
376         return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
377                                    __builtin_return_address(0));
378 }
379 EXPORT_SYMBOL(ioremap_wc);
380
381 /*
382  * Remap an arbitrary physical address space into the kernel virtual
383  * address space as memory. Needed when the kernel wants to execute
384  * code in external memory. This is needed for reprogramming source
385  * clocks that would affect normal memory for example. Please see
386  * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
387  */
388 void __iomem *
389 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
390 {
391         unsigned int mtype;
392
393         if (cached)
394                 mtype = MT_MEMORY_RWX;
395         else
396                 mtype = MT_MEMORY_RWX_NONCACHED;
397
398         return __arm_ioremap_caller(phys_addr, size, mtype,
399                         __builtin_return_address(0));
400 }
401
402 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
403 {
404         return (__force void *)arch_ioremap_caller(phys_addr, size,
405                                                    MT_MEMORY_RW,
406                                                    __builtin_return_address(0));
407 }
408
409 void __iounmap(volatile void __iomem *io_addr)
410 {
411         void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
412         struct static_vm *svm;
413
414         /* If this is a static mapping, we must leave it alone */
415         svm = find_static_vm_vaddr(addr);
416         if (svm)
417                 return;
418
419 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
420         {
421                 struct vm_struct *vm;
422
423                 vm = find_vm_area(addr);
424
425                 /*
426                  * If this is a section based mapping we need to handle it
427                  * specially as the VM subsystem does not know how to handle
428                  * such a beast.
429                  */
430                 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
431                         unmap_area_sections((unsigned long)vm->addr, vm->size);
432         }
433 #endif
434
435         vunmap(addr);
436 }
437
438 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
439
440 void iounmap(volatile void __iomem *cookie)
441 {
442         arch_iounmap(cookie);
443 }
444 EXPORT_SYMBOL(iounmap);
445
446 #ifdef CONFIG_PCI
447 static int pci_ioremap_mem_type = MT_DEVICE;
448
449 void pci_ioremap_set_mem_type(int mem_type)
450 {
451         pci_ioremap_mem_type = mem_type;
452 }
453
454 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
455 {
456         BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
457
458         return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
459                                   PCI_IO_VIRT_BASE + offset + SZ_64K,
460                                   phys_addr,
461                                   __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
462 }
463 EXPORT_SYMBOL_GPL(pci_ioremap_io);
464
465 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
466 {
467         return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
468                                    __builtin_return_address(0));
469 }
470 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
471 #endif
472
473 /*
474  * Must be called after early_fixmap_init
475  */
476 void __init early_ioremap_init(void)
477 {
478         early_ioremap_setup();
479 }