Merge tag 'devicetree-fixes-for-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / nios2 / mm / ioremap.c
1 /*
2  * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
3  * Copyright (C) 2009 Wind River Systems Inc
4  *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
5  * Copyright (C) 2004 Microtronix Datacom Ltd.
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License. See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/io.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
21
22 static inline void remap_area_pte(pte_t *pte, unsigned long address,
23                                 unsigned long size, unsigned long phys_addr,
24                                 unsigned long flags)
25 {
26         unsigned long end;
27         unsigned long pfn;
28         pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
29                                 | _PAGE_WRITE | flags);
30
31         address &= ~PMD_MASK;
32         end = address + size;
33         if (end > PMD_SIZE)
34                 end = PMD_SIZE;
35         if (address >= end)
36                 BUG();
37         pfn = PFN_DOWN(phys_addr);
38         do {
39                 if (!pte_none(*pte)) {
40                         pr_err("remap_area_pte: page already exists\n");
41                         BUG();
42                 }
43                 set_pte(pte, pfn_pte(pfn, pgprot));
44                 address += PAGE_SIZE;
45                 pfn++;
46                 pte++;
47         } while (address && (address < end));
48 }
49
50 static inline int remap_area_pmd(pmd_t *pmd, unsigned long address,
51                                 unsigned long size, unsigned long phys_addr,
52                                 unsigned long flags)
53 {
54         unsigned long end;
55
56         address &= ~PGDIR_MASK;
57         end = address + size;
58         if (end > PGDIR_SIZE)
59                 end = PGDIR_SIZE;
60         phys_addr -= address;
61         if (address >= end)
62                 BUG();
63         do {
64                 pte_t *pte = pte_alloc_kernel(pmd, address);
65
66                 if (!pte)
67                         return -ENOMEM;
68                 remap_area_pte(pte, address, end - address, address + phys_addr,
69                         flags);
70                 address = (address + PMD_SIZE) & PMD_MASK;
71                 pmd++;
72         } while (address && (address < end));
73         return 0;
74 }
75
76 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
77                                 unsigned long size, unsigned long flags)
78 {
79         int error;
80         pgd_t *dir;
81         unsigned long end = address + size;
82
83         phys_addr -= address;
84         dir = pgd_offset(&init_mm, address);
85         flush_cache_all();
86         if (address >= end)
87                 BUG();
88         do {
89                 p4d_t *p4d;
90                 pud_t *pud;
91                 pmd_t *pmd;
92
93                 error = -ENOMEM;
94                 p4d = p4d_alloc(&init_mm, dir, address);
95                 if (!p4d)
96                         break;
97                 pud = pud_alloc(&init_mm, p4d, address);
98                 if (!pud)
99                         break;
100                 pmd = pmd_alloc(&init_mm, pud, address);
101                 if (!pmd)
102                         break;
103                 if (remap_area_pmd(pmd, address, end - address,
104                         phys_addr + address, flags))
105                         break;
106                 error = 0;
107                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
108                 dir++;
109         } while (address && (address < end));
110         flush_tlb_all();
111         return error;
112 }
113
114 #define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
115
116 /*
117  * Map some physical address range into the kernel address space.
118  */
119 void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
120 {
121         struct vm_struct *area;
122         unsigned long offset;
123         unsigned long last_addr;
124         void *addr;
125
126         /* Don't allow wraparound or zero size */
127         last_addr = phys_addr + size - 1;
128
129         if (!size || last_addr < phys_addr)
130                 return NULL;
131
132         /* Don't allow anybody to remap normal RAM that we're using */
133         if (phys_addr > PHYS_OFFSET && phys_addr < virt_to_phys(high_memory)) {
134                 char *t_addr, *t_end;
135                 struct page *page;
136
137                 t_addr = __va(phys_addr);
138                 t_end = t_addr + (size - 1);
139                 for (page = virt_to_page(t_addr);
140                         page <= virt_to_page(t_end); page++)
141                         if (!PageReserved(page))
142                                 return NULL;
143         }
144
145         /*
146          * Map uncached objects in the low part of address space to
147          * CONFIG_NIOS2_IO_REGION_BASE
148          */
149         if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
150             IS_MAPPABLE_UNCACHEABLE(last_addr))
151                 return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
152
153         /* Mappings have to be page-aligned */
154         offset = phys_addr & ~PAGE_MASK;
155         phys_addr &= PAGE_MASK;
156         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
157
158         /* Ok, go for it */
159         area = get_vm_area(size, VM_IOREMAP);
160         if (!area)
161                 return NULL;
162         addr = area->addr;
163         if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) {
164                 vunmap(addr);
165                 return NULL;
166         }
167         return (void __iomem *) (offset + (char *)addr);
168 }
169 EXPORT_SYMBOL(ioremap);
170
171 /*
172  * iounmap unmaps nearly everything, so be careful
173  * it doesn't free currently pointer/page tables anymore but it
174  * wasn't used anyway and might be added later.
175  */
176 void iounmap(void __iomem *addr)
177 {
178         struct vm_struct *p;
179
180         if ((unsigned long) addr > CONFIG_NIOS2_IO_REGION_BASE)
181                 return;
182
183         p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
184         if (!p)
185                 pr_err("iounmap: bad address %p\n", addr);
186         kfree(p);
187 }
188 EXPORT_SYMBOL(iounmap);