Merge tag 'v5.9' into next
[linux-2.6-microblaze.git] / kernel / dma / pool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Copyright (C) 2020 Google LLC
5  */
6 #include <linux/cma.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-contiguous.h>
9 #include <linux/dma-direct.h>
10 #include <linux/dma-noncoherent.h>
11 #include <linux/init.h>
12 #include <linux/genalloc.h>
13 #include <linux/set_memory.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16
17 static struct gen_pool *atomic_pool_dma __ro_after_init;
18 static unsigned long pool_size_dma;
19 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
20 static unsigned long pool_size_dma32;
21 static struct gen_pool *atomic_pool_kernel __ro_after_init;
22 static unsigned long pool_size_kernel;
23
24 /* Size can be defined by the coherent_pool command line */
25 static size_t atomic_pool_size;
26
27 /* Dynamic background expansion when the atomic pool is near capacity */
28 static struct work_struct atomic_pool_work;
29
30 static int __init early_coherent_pool(char *p)
31 {
32         atomic_pool_size = memparse(p, &p);
33         return 0;
34 }
35 early_param("coherent_pool", early_coherent_pool);
36
37 static void __init dma_atomic_pool_debugfs_init(void)
38 {
39         struct dentry *root;
40
41         root = debugfs_create_dir("dma_pools", NULL);
42         if (IS_ERR_OR_NULL(root))
43                 return;
44
45         debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
46         debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
47         debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
48 }
49
50 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
51 {
52         if (gfp & __GFP_DMA)
53                 pool_size_dma += size;
54         else if (gfp & __GFP_DMA32)
55                 pool_size_dma32 += size;
56         else
57                 pool_size_kernel += size;
58 }
59
60 static bool cma_in_zone(gfp_t gfp)
61 {
62         unsigned long size;
63         phys_addr_t end;
64         struct cma *cma;
65
66         cma = dev_get_cma_area(NULL);
67         if (!cma)
68                 return false;
69
70         size = cma_get_size(cma);
71         if (!size)
72                 return false;
73
74         /* CMA can't cross zone boundaries, see cma_activate_area() */
75         end = cma_get_base(cma) + size - 1;
76         if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
77                 return end <= DMA_BIT_MASK(zone_dma_bits);
78         if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
79                 return end <= DMA_BIT_MASK(32);
80         return true;
81 }
82
83 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
84                               gfp_t gfp)
85 {
86         unsigned int order;
87         struct page *page = NULL;
88         void *addr;
89         int ret = -ENOMEM;
90
91         /* Cannot allocate larger than MAX_ORDER-1 */
92         order = min(get_order(pool_size), MAX_ORDER-1);
93
94         do {
95                 pool_size = 1 << (PAGE_SHIFT + order);
96                 if (cma_in_zone(gfp))
97                         page = dma_alloc_from_contiguous(NULL, 1 << order,
98                                                          order, false);
99                 if (!page)
100                         page = alloc_pages(gfp, order);
101         } while (!page && order-- > 0);
102         if (!page)
103                 goto out;
104
105         arch_dma_prep_coherent(page, pool_size);
106
107 #ifdef CONFIG_DMA_DIRECT_REMAP
108         addr = dma_common_contiguous_remap(page, pool_size,
109                                            pgprot_dmacoherent(PAGE_KERNEL),
110                                            __builtin_return_address(0));
111         if (!addr)
112                 goto free_page;
113 #else
114         addr = page_to_virt(page);
115 #endif
116         /*
117          * Memory in the atomic DMA pools must be unencrypted, the pools do not
118          * shrink so no re-encryption occurs in dma_direct_free_pages().
119          */
120         ret = set_memory_decrypted((unsigned long)page_to_virt(page),
121                                    1 << order);
122         if (ret)
123                 goto remove_mapping;
124         ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
125                                 pool_size, NUMA_NO_NODE);
126         if (ret)
127                 goto encrypt_mapping;
128
129         dma_atomic_pool_size_add(gfp, pool_size);
130         return 0;
131
132 encrypt_mapping:
133         ret = set_memory_encrypted((unsigned long)page_to_virt(page),
134                                    1 << order);
135         if (WARN_ON_ONCE(ret)) {
136                 /* Decrypt succeeded but encrypt failed, purposely leak */
137                 goto out;
138         }
139 remove_mapping:
140 #ifdef CONFIG_DMA_DIRECT_REMAP
141         dma_common_free_remap(addr, pool_size);
142 #endif
143 free_page: __maybe_unused
144         __free_pages(page, order);
145 out:
146         return ret;
147 }
148
149 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
150 {
151         if (pool && gen_pool_avail(pool) < atomic_pool_size)
152                 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
153 }
154
155 static void atomic_pool_work_fn(struct work_struct *work)
156 {
157         if (IS_ENABLED(CONFIG_ZONE_DMA))
158                 atomic_pool_resize(atomic_pool_dma,
159                                    GFP_KERNEL | GFP_DMA);
160         if (IS_ENABLED(CONFIG_ZONE_DMA32))
161                 atomic_pool_resize(atomic_pool_dma32,
162                                    GFP_KERNEL | GFP_DMA32);
163         atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
164 }
165
166 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
167                                                       gfp_t gfp)
168 {
169         struct gen_pool *pool;
170         int ret;
171
172         pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
173         if (!pool)
174                 return NULL;
175
176         gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
177
178         ret = atomic_pool_expand(pool, pool_size, gfp);
179         if (ret) {
180                 gen_pool_destroy(pool);
181                 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
182                        pool_size >> 10, &gfp);
183                 return NULL;
184         }
185
186         pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
187                 gen_pool_size(pool) >> 10, &gfp);
188         return pool;
189 }
190
191 static int __init dma_atomic_pool_init(void)
192 {
193         int ret = 0;
194
195         /*
196          * If coherent_pool was not used on the command line, default the pool
197          * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
198          */
199         if (!atomic_pool_size) {
200                 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
201                 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
202                 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
203         }
204         INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
205
206         atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
207                                                     GFP_KERNEL);
208         if (!atomic_pool_kernel)
209                 ret = -ENOMEM;
210         if (IS_ENABLED(CONFIG_ZONE_DMA)) {
211                 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
212                                                 GFP_KERNEL | GFP_DMA);
213                 if (!atomic_pool_dma)
214                         ret = -ENOMEM;
215         }
216         if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
217                 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
218                                                 GFP_KERNEL | GFP_DMA32);
219                 if (!atomic_pool_dma32)
220                         ret = -ENOMEM;
221         }
222
223         dma_atomic_pool_debugfs_init();
224         return ret;
225 }
226 postcore_initcall(dma_atomic_pool_init);
227
228 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
229 {
230         if (prev == NULL) {
231                 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
232                         return atomic_pool_dma32;
233                 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
234                         return atomic_pool_dma;
235                 return atomic_pool_kernel;
236         }
237         if (prev == atomic_pool_kernel)
238                 return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
239         if (prev == atomic_pool_dma32)
240                 return atomic_pool_dma;
241         return NULL;
242 }
243
244 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
245                 struct gen_pool *pool, void **cpu_addr,
246                 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
247 {
248         unsigned long addr;
249         phys_addr_t phys;
250
251         addr = gen_pool_alloc(pool, size);
252         if (!addr)
253                 return NULL;
254
255         phys = gen_pool_virt_to_phys(pool, addr);
256         if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
257                 gen_pool_free(pool, addr, size);
258                 return NULL;
259         }
260
261         if (gen_pool_avail(pool) < atomic_pool_size)
262                 schedule_work(&atomic_pool_work);
263
264         *cpu_addr = (void *)addr;
265         memset(*cpu_addr, 0, size);
266         return pfn_to_page(__phys_to_pfn(phys));
267 }
268
269 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
270                 void **cpu_addr, gfp_t gfp,
271                 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
272 {
273         struct gen_pool *pool = NULL;
274         struct page *page;
275
276         while ((pool = dma_guess_pool(pool, gfp))) {
277                 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
278                                              phys_addr_ok);
279                 if (page)
280                         return page;
281         }
282
283         WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
284         return NULL;
285 }
286
287 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
288 {
289         struct gen_pool *pool = NULL;
290
291         while ((pool = dma_guess_pool(pool, 0))) {
292                 if (!gen_pool_has_addr(pool, (unsigned long)start, size))
293                         continue;
294                 gen_pool_free(pool, (unsigned long)start, size);
295                 return true;
296         }
297
298         return false;
299 }