qed: suppress "don't support RoCE & iWARP" flooding on HW init
[linux-2.6-microblaze.git] / lib / ioremap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Re-map IO memory to kernel address space so that we can access it.
4  * This is needed for high PCI addresses that aren't mapped in the
5  * 640k-1MB IO memory area on PC's
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  */
9 #include <linux/vmalloc.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/io.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15
16 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17 static int __read_mostly ioremap_p4d_capable;
18 static int __read_mostly ioremap_pud_capable;
19 static int __read_mostly ioremap_pmd_capable;
20 static int __read_mostly ioremap_huge_disabled;
21
22 static int __init set_nohugeiomap(char *str)
23 {
24         ioremap_huge_disabled = 1;
25         return 0;
26 }
27 early_param("nohugeiomap", set_nohugeiomap);
28
29 void __init ioremap_huge_init(void)
30 {
31         if (!ioremap_huge_disabled) {
32                 if (arch_ioremap_p4d_supported())
33                         ioremap_p4d_capable = 1;
34                 if (arch_ioremap_pud_supported())
35                         ioremap_pud_capable = 1;
36                 if (arch_ioremap_pmd_supported())
37                         ioremap_pmd_capable = 1;
38         }
39 }
40
41 static inline int ioremap_p4d_enabled(void)
42 {
43         return ioremap_p4d_capable;
44 }
45
46 static inline int ioremap_pud_enabled(void)
47 {
48         return ioremap_pud_capable;
49 }
50
51 static inline int ioremap_pmd_enabled(void)
52 {
53         return ioremap_pmd_capable;
54 }
55
56 #else   /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
57 static inline int ioremap_p4d_enabled(void) { return 0; }
58 static inline int ioremap_pud_enabled(void) { return 0; }
59 static inline int ioremap_pmd_enabled(void) { return 0; }
60 #endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
61
62 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
63                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
64                 pgtbl_mod_mask *mask)
65 {
66         pte_t *pte;
67         u64 pfn;
68
69         pfn = phys_addr >> PAGE_SHIFT;
70         pte = pte_alloc_kernel_track(pmd, addr, mask);
71         if (!pte)
72                 return -ENOMEM;
73         do {
74                 BUG_ON(!pte_none(*pte));
75                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
76                 pfn++;
77         } while (pte++, addr += PAGE_SIZE, addr != end);
78         *mask |= PGTBL_PTE_MODIFIED;
79         return 0;
80 }
81
82 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
83                                 unsigned long end, phys_addr_t phys_addr,
84                                 pgprot_t prot)
85 {
86         if (!ioremap_pmd_enabled())
87                 return 0;
88
89         if ((end - addr) != PMD_SIZE)
90                 return 0;
91
92         if (!IS_ALIGNED(addr, PMD_SIZE))
93                 return 0;
94
95         if (!IS_ALIGNED(phys_addr, PMD_SIZE))
96                 return 0;
97
98         if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
99                 return 0;
100
101         return pmd_set_huge(pmd, phys_addr, prot);
102 }
103
104 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
105                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
106                 pgtbl_mod_mask *mask)
107 {
108         pmd_t *pmd;
109         unsigned long next;
110
111         pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
112         if (!pmd)
113                 return -ENOMEM;
114         do {
115                 next = pmd_addr_end(addr, end);
116
117                 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
118                         *mask |= PGTBL_PMD_MODIFIED;
119                         continue;
120                 }
121
122                 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
123                         return -ENOMEM;
124         } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
125         return 0;
126 }
127
128 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
129                                 unsigned long end, phys_addr_t phys_addr,
130                                 pgprot_t prot)
131 {
132         if (!ioremap_pud_enabled())
133                 return 0;
134
135         if ((end - addr) != PUD_SIZE)
136                 return 0;
137
138         if (!IS_ALIGNED(addr, PUD_SIZE))
139                 return 0;
140
141         if (!IS_ALIGNED(phys_addr, PUD_SIZE))
142                 return 0;
143
144         if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
145                 return 0;
146
147         return pud_set_huge(pud, phys_addr, prot);
148 }
149
150 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
151                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
152                 pgtbl_mod_mask *mask)
153 {
154         pud_t *pud;
155         unsigned long next;
156
157         pud = pud_alloc_track(&init_mm, p4d, addr, mask);
158         if (!pud)
159                 return -ENOMEM;
160         do {
161                 next = pud_addr_end(addr, end);
162
163                 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
164                         *mask |= PGTBL_PUD_MODIFIED;
165                         continue;
166                 }
167
168                 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
169                         return -ENOMEM;
170         } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
171         return 0;
172 }
173
174 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
175                                 unsigned long end, phys_addr_t phys_addr,
176                                 pgprot_t prot)
177 {
178         if (!ioremap_p4d_enabled())
179                 return 0;
180
181         if ((end - addr) != P4D_SIZE)
182                 return 0;
183
184         if (!IS_ALIGNED(addr, P4D_SIZE))
185                 return 0;
186
187         if (!IS_ALIGNED(phys_addr, P4D_SIZE))
188                 return 0;
189
190         if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
191                 return 0;
192
193         return p4d_set_huge(p4d, phys_addr, prot);
194 }
195
196 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
197                 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
198                 pgtbl_mod_mask *mask)
199 {
200         p4d_t *p4d;
201         unsigned long next;
202
203         p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
204         if (!p4d)
205                 return -ENOMEM;
206         do {
207                 next = p4d_addr_end(addr, end);
208
209                 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
210                         *mask |= PGTBL_P4D_MODIFIED;
211                         continue;
212                 }
213
214                 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
215                         return -ENOMEM;
216         } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
217         return 0;
218 }
219
220 int ioremap_page_range(unsigned long addr,
221                        unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
222 {
223         pgd_t *pgd;
224         unsigned long start;
225         unsigned long next;
226         int err;
227         pgtbl_mod_mask mask = 0;
228
229         might_sleep();
230         BUG_ON(addr >= end);
231
232         start = addr;
233         pgd = pgd_offset_k(addr);
234         do {
235                 next = pgd_addr_end(addr, end);
236                 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
237                                         &mask);
238                 if (err)
239                         break;
240         } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
241
242         flush_cache_vmap(start, end);
243
244         if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
245                 arch_sync_kernel_mappings(start, end);
246
247         return err;
248 }
249
250 #ifdef CONFIG_GENERIC_IOREMAP
251 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
252 {
253         unsigned long offset, vaddr;
254         phys_addr_t last_addr;
255         struct vm_struct *area;
256
257         /* Disallow wrap-around or zero size */
258         last_addr = addr + size - 1;
259         if (!size || last_addr < addr)
260                 return NULL;
261
262         /* Page-align mappings */
263         offset = addr & (~PAGE_MASK);
264         addr -= offset;
265         size = PAGE_ALIGN(size + offset);
266
267         area = get_vm_area_caller(size, VM_IOREMAP,
268                         __builtin_return_address(0));
269         if (!area)
270                 return NULL;
271         vaddr = (unsigned long)area->addr;
272
273         if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
274                 free_vm_area(area);
275                 return NULL;
276         }
277
278         return (void __iomem *)(vaddr + offset);
279 }
280 EXPORT_SYMBOL(ioremap_prot);
281
282 void iounmap(volatile void __iomem *addr)
283 {
284         vunmap((void *)((unsigned long)addr & PAGE_MASK));
285 }
286 EXPORT_SYMBOL(iounmap);
287 #endif /* CONFIG_GENERIC_IOREMAP */