Merge tag 'hwlock-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[linux-2.6-microblaze.git] / kernel / dma / pool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Copyright (C) 2020 Google LLC
5  */
6 #include <linux/debugfs.h>
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/init.h>
10 #include <linux/genalloc.h>
11 #include <linux/set_memory.h>
12 #include <linux/slab.h>
13 #include <linux/workqueue.h>
14
15 static struct gen_pool *atomic_pool_dma __ro_after_init;
16 static unsigned long pool_size_dma;
17 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
18 static unsigned long pool_size_dma32;
19 static struct gen_pool *atomic_pool_kernel __ro_after_init;
20 static unsigned long pool_size_kernel;
21
22 /* Size can be defined by the coherent_pool command line */
23 static size_t atomic_pool_size;
24
25 /* Dynamic background expansion when the atomic pool is near capacity */
26 static struct work_struct atomic_pool_work;
27
28 static int __init early_coherent_pool(char *p)
29 {
30         atomic_pool_size = memparse(p, &p);
31         return 0;
32 }
33 early_param("coherent_pool", early_coherent_pool);
34
35 static void __init dma_atomic_pool_debugfs_init(void)
36 {
37         struct dentry *root;
38
39         root = debugfs_create_dir("dma_pools", NULL);
40         if (IS_ERR_OR_NULL(root))
41                 return;
42
43         debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
44         debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
45         debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
46 }
47
48 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
49 {
50         if (gfp & __GFP_DMA)
51                 pool_size_dma += size;
52         else if (gfp & __GFP_DMA32)
53                 pool_size_dma32 += size;
54         else
55                 pool_size_kernel += size;
56 }
57
58 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
59                               gfp_t gfp)
60 {
61         unsigned int order;
62         struct page *page;
63         void *addr;
64         int ret = -ENOMEM;
65
66         /* Cannot allocate larger than MAX_ORDER-1 */
67         order = min(get_order(pool_size), MAX_ORDER-1);
68
69         do {
70                 pool_size = 1 << (PAGE_SHIFT + order);
71                 page = alloc_pages(gfp, order);
72         } while (!page && order-- > 0);
73         if (!page)
74                 goto out;
75
76         arch_dma_prep_coherent(page, pool_size);
77
78 #ifdef CONFIG_DMA_DIRECT_REMAP
79         addr = dma_common_contiguous_remap(page, pool_size,
80                                            pgprot_dmacoherent(PAGE_KERNEL),
81                                            __builtin_return_address(0));
82         if (!addr)
83                 goto free_page;
84 #else
85         addr = page_to_virt(page);
86 #endif
87         /*
88          * Memory in the atomic DMA pools must be unencrypted, the pools do not
89          * shrink so no re-encryption occurs in dma_direct_free_pages().
90          */
91         ret = set_memory_decrypted((unsigned long)page_to_virt(page),
92                                    1 << order);
93         if (ret)
94                 goto remove_mapping;
95         ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
96                                 pool_size, NUMA_NO_NODE);
97         if (ret)
98                 goto encrypt_mapping;
99
100         dma_atomic_pool_size_add(gfp, pool_size);
101         return 0;
102
103 encrypt_mapping:
104         ret = set_memory_encrypted((unsigned long)page_to_virt(page),
105                                    1 << order);
106         if (WARN_ON_ONCE(ret)) {
107                 /* Decrypt succeeded but encrypt failed, purposely leak */
108                 goto out;
109         }
110 remove_mapping:
111 #ifdef CONFIG_DMA_DIRECT_REMAP
112         dma_common_free_remap(addr, pool_size);
113 #endif
114 free_page: __maybe_unused
115         __free_pages(page, order);
116 out:
117         return ret;
118 }
119
120 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
121 {
122         if (pool && gen_pool_avail(pool) < atomic_pool_size)
123                 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
124 }
125
126 static void atomic_pool_work_fn(struct work_struct *work)
127 {
128         if (IS_ENABLED(CONFIG_ZONE_DMA))
129                 atomic_pool_resize(atomic_pool_dma,
130                                    GFP_KERNEL | GFP_DMA);
131         if (IS_ENABLED(CONFIG_ZONE_DMA32))
132                 atomic_pool_resize(atomic_pool_dma32,
133                                    GFP_KERNEL | GFP_DMA32);
134         atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
135 }
136
137 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
138                                                       gfp_t gfp)
139 {
140         struct gen_pool *pool;
141         int ret;
142
143         pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
144         if (!pool)
145                 return NULL;
146
147         gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
148
149         ret = atomic_pool_expand(pool, pool_size, gfp);
150         if (ret) {
151                 gen_pool_destroy(pool);
152                 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
153                        pool_size >> 10, &gfp);
154                 return NULL;
155         }
156
157         pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
158                 gen_pool_size(pool) >> 10, &gfp);
159         return pool;
160 }
161
162 static int __init dma_atomic_pool_init(void)
163 {
164         int ret = 0;
165
166         /*
167          * If coherent_pool was not used on the command line, default the pool
168          * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
169          */
170         if (!atomic_pool_size) {
171                 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
172                 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
173                 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
174         }
175         INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
176
177         atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
178                                                     GFP_KERNEL);
179         if (!atomic_pool_kernel)
180                 ret = -ENOMEM;
181         if (IS_ENABLED(CONFIG_ZONE_DMA)) {
182                 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
183                                                 GFP_KERNEL | GFP_DMA);
184                 if (!atomic_pool_dma)
185                         ret = -ENOMEM;
186         }
187         if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
188                 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
189                                                 GFP_KERNEL | GFP_DMA32);
190                 if (!atomic_pool_dma32)
191                         ret = -ENOMEM;
192         }
193
194         dma_atomic_pool_debugfs_init();
195         return ret;
196 }
197 postcore_initcall(dma_atomic_pool_init);
198
199 static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
200 {
201         u64 phys_mask;
202         gfp_t gfp;
203
204         gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
205                                           &phys_mask);
206         if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
207                 return atomic_pool_dma;
208         if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
209                 return atomic_pool_dma32;
210         return atomic_pool_kernel;
211 }
212
213 static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
214 {
215         if (bad_pool == atomic_pool_kernel)
216                 return atomic_pool_dma32 ? : atomic_pool_dma;
217
218         if (bad_pool == atomic_pool_dma32)
219                 return atomic_pool_dma;
220
221         return NULL;
222 }
223
224 static inline struct gen_pool *dma_guess_pool(struct device *dev,
225                                               struct gen_pool *bad_pool)
226 {
227         if (bad_pool)
228                 return dma_get_safer_pool(bad_pool);
229
230         return dma_guess_pool_from_device(dev);
231 }
232
233 void *dma_alloc_from_pool(struct device *dev, size_t size,
234                           struct page **ret_page, gfp_t flags)
235 {
236         struct gen_pool *pool = NULL;
237         unsigned long val = 0;
238         void *ptr = NULL;
239         phys_addr_t phys;
240
241         while (1) {
242                 pool = dma_guess_pool(dev, pool);
243                 if (!pool) {
244                         WARN(1, "Failed to get suitable pool for %s\n",
245                              dev_name(dev));
246                         break;
247                 }
248
249                 val = gen_pool_alloc(pool, size);
250                 if (!val)
251                         continue;
252
253                 phys = gen_pool_virt_to_phys(pool, val);
254                 if (dma_coherent_ok(dev, phys, size))
255                         break;
256
257                 gen_pool_free(pool, val, size);
258                 val = 0;
259         }
260
261
262         if (val) {
263                 *ret_page = pfn_to_page(__phys_to_pfn(phys));
264                 ptr = (void *)val;
265                 memset(ptr, 0, size);
266
267                 if (gen_pool_avail(pool) < atomic_pool_size)
268                         schedule_work(&atomic_pool_work);
269         }
270
271         return ptr;
272 }
273
274 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
275 {
276         struct gen_pool *pool = NULL;
277
278         while (1) {
279                 pool = dma_guess_pool(dev, pool);
280                 if (!pool)
281                         return false;
282
283                 if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
284                         gen_pool_free(pool, (unsigned long)start, size);
285                         return true;
286                 }
287         }
288 }