d4637f72239b404f4fbef822e74cb4c441062034
[linux-2.6-microblaze.git] / kernel / dma / pool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Copyright (C) 2020 Google LLC
5  */
6 #include <linux/cma.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
10 #include <linux/init.h>
11 #include <linux/genalloc.h>
12 #include <linux/set_memory.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
15
16 static struct gen_pool *atomic_pool_dma __ro_after_init;
17 static unsigned long pool_size_dma;
18 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
19 static unsigned long pool_size_dma32;
20 static struct gen_pool *atomic_pool_kernel __ro_after_init;
21 static unsigned long pool_size_kernel;
22
23 /* Size can be defined by the coherent_pool command line */
24 static size_t atomic_pool_size;
25
26 /* Dynamic background expansion when the atomic pool is near capacity */
27 static struct work_struct atomic_pool_work;
28
29 static int __init early_coherent_pool(char *p)
30 {
31         atomic_pool_size = memparse(p, &p);
32         return 0;
33 }
34 early_param("coherent_pool", early_coherent_pool);
35
36 static void __init dma_atomic_pool_debugfs_init(void)
37 {
38         struct dentry *root;
39
40         root = debugfs_create_dir("dma_pools", NULL);
41         if (IS_ERR_OR_NULL(root))
42                 return;
43
44         debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
45         debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
46         debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
47 }
48
49 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
50 {
51         if (gfp & __GFP_DMA)
52                 pool_size_dma += size;
53         else if (gfp & __GFP_DMA32)
54                 pool_size_dma32 += size;
55         else
56                 pool_size_kernel += size;
57 }
58
59 static bool cma_in_zone(gfp_t gfp)
60 {
61         unsigned long size;
62         phys_addr_t end;
63         struct cma *cma;
64
65         cma = dev_get_cma_area(NULL);
66         if (!cma)
67                 return false;
68
69         size = cma_get_size(cma);
70         if (!size)
71                 return false;
72
73         /* CMA can't cross zone boundaries, see cma_activate_area() */
74         end = cma_get_base(cma) + size - 1;
75         if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
76                 return end <= DMA_BIT_MASK(zone_dma_bits);
77         if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
78                 return end <= DMA_BIT_MASK(32);
79         return true;
80 }
81
82 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
83                               gfp_t gfp)
84 {
85         unsigned int order;
86         struct page *page = NULL;
87         void *addr;
88         int ret = -ENOMEM;
89
90         /* Cannot allocate larger than MAX_ORDER-1 */
91         order = min(get_order(pool_size), MAX_ORDER-1);
92
93         do {
94                 pool_size = 1 << (PAGE_SHIFT + order);
95                 if (cma_in_zone(gfp))
96                         page = dma_alloc_from_contiguous(NULL, 1 << order,
97                                                          order, false);
98                 if (!page)
99                         page = alloc_pages(gfp, order);
100         } while (!page && order-- > 0);
101         if (!page)
102                 goto out;
103
104         arch_dma_prep_coherent(page, pool_size);
105
106 #ifdef CONFIG_DMA_DIRECT_REMAP
107         addr = dma_common_contiguous_remap(page, pool_size,
108                                            pgprot_dmacoherent(PAGE_KERNEL),
109                                            __builtin_return_address(0));
110         if (!addr)
111                 goto free_page;
112 #else
113         addr = page_to_virt(page);
114 #endif
115         /*
116          * Memory in the atomic DMA pools must be unencrypted, the pools do not
117          * shrink so no re-encryption occurs in dma_direct_free().
118          */
119         ret = set_memory_decrypted((unsigned long)page_to_virt(page),
120                                    1 << order);
121         if (ret)
122                 goto remove_mapping;
123         ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
124                                 pool_size, NUMA_NO_NODE);
125         if (ret)
126                 goto encrypt_mapping;
127
128         dma_atomic_pool_size_add(gfp, pool_size);
129         return 0;
130
131 encrypt_mapping:
132         ret = set_memory_encrypted((unsigned long)page_to_virt(page),
133                                    1 << order);
134         if (WARN_ON_ONCE(ret)) {
135                 /* Decrypt succeeded but encrypt failed, purposely leak */
136                 goto out;
137         }
138 remove_mapping:
139 #ifdef CONFIG_DMA_DIRECT_REMAP
140         dma_common_free_remap(addr, pool_size);
141 #endif
142 free_page: __maybe_unused
143         __free_pages(page, order);
144 out:
145         return ret;
146 }
147
148 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
149 {
150         if (pool && gen_pool_avail(pool) < atomic_pool_size)
151                 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
152 }
153
154 static void atomic_pool_work_fn(struct work_struct *work)
155 {
156         if (IS_ENABLED(CONFIG_ZONE_DMA))
157                 atomic_pool_resize(atomic_pool_dma,
158                                    GFP_KERNEL | GFP_DMA);
159         if (IS_ENABLED(CONFIG_ZONE_DMA32))
160                 atomic_pool_resize(atomic_pool_dma32,
161                                    GFP_KERNEL | GFP_DMA32);
162         atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
163 }
164
165 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
166                                                       gfp_t gfp)
167 {
168         struct gen_pool *pool;
169         int ret;
170
171         pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
172         if (!pool)
173                 return NULL;
174
175         gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
176
177         ret = atomic_pool_expand(pool, pool_size, gfp);
178         if (ret) {
179                 gen_pool_destroy(pool);
180                 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
181                        pool_size >> 10, &gfp);
182                 return NULL;
183         }
184
185         pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
186                 gen_pool_size(pool) >> 10, &gfp);
187         return pool;
188 }
189
190 static int __init dma_atomic_pool_init(void)
191 {
192         int ret = 0;
193
194         /*
195          * If coherent_pool was not used on the command line, default the pool
196          * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
197          */
198         if (!atomic_pool_size) {
199                 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
200                 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
201                 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
202         }
203         INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
204
205         atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
206                                                     GFP_KERNEL);
207         if (!atomic_pool_kernel)
208                 ret = -ENOMEM;
209         if (IS_ENABLED(CONFIG_ZONE_DMA)) {
210                 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
211                                                 GFP_KERNEL | GFP_DMA);
212                 if (!atomic_pool_dma)
213                         ret = -ENOMEM;
214         }
215         if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
216                 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
217                                                 GFP_KERNEL | GFP_DMA32);
218                 if (!atomic_pool_dma32)
219                         ret = -ENOMEM;
220         }
221
222         dma_atomic_pool_debugfs_init();
223         return ret;
224 }
225 postcore_initcall(dma_atomic_pool_init);
226
227 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
228 {
229         if (prev == NULL) {
230                 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
231                         return atomic_pool_dma32;
232                 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
233                         return atomic_pool_dma;
234                 return atomic_pool_kernel;
235         }
236         if (prev == atomic_pool_kernel)
237                 return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
238         if (prev == atomic_pool_dma32)
239                 return atomic_pool_dma;
240         return NULL;
241 }
242
243 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
244                 struct gen_pool *pool, void **cpu_addr,
245                 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
246 {
247         unsigned long addr;
248         phys_addr_t phys;
249
250         addr = gen_pool_alloc(pool, size);
251         if (!addr)
252                 return NULL;
253
254         phys = gen_pool_virt_to_phys(pool, addr);
255         if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
256                 gen_pool_free(pool, addr, size);
257                 return NULL;
258         }
259
260         if (gen_pool_avail(pool) < atomic_pool_size)
261                 schedule_work(&atomic_pool_work);
262
263         *cpu_addr = (void *)addr;
264         memset(*cpu_addr, 0, size);
265         return pfn_to_page(__phys_to_pfn(phys));
266 }
267
268 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
269                 void **cpu_addr, gfp_t gfp,
270                 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
271 {
272         struct gen_pool *pool = NULL;
273         struct page *page;
274
275         while ((pool = dma_guess_pool(pool, gfp))) {
276                 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
277                                              phys_addr_ok);
278                 if (page)
279                         return page;
280         }
281
282         WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
283         return NULL;
284 }
285
286 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
287 {
288         struct gen_pool *pool = NULL;
289
290         while ((pool = dma_guess_pool(pool, 0))) {
291                 if (!gen_pool_has_addr(pool, (unsigned long)start, size))
292                         continue;
293                 gen_pool_free(pool, (unsigned long)start, size);
294                 return true;
295         }
296
297         return false;
298 }