Merge tag 'mvebu-fixes-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/gcleme...
[linux-2.6-microblaze.git] / kernel / dma / pool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Copyright (C) 2020 Google LLC
5  */
6 #include <linux/debugfs.h>
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/dma-contiguous.h>
10 #include <linux/init.h>
11 #include <linux/genalloc.h>
12 #include <linux/set_memory.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
15
16 static struct gen_pool *atomic_pool_dma __ro_after_init;
17 static unsigned long pool_size_dma;
18 static struct gen_pool *atomic_pool_dma32 __ro_after_init;
19 static unsigned long pool_size_dma32;
20 static struct gen_pool *atomic_pool_kernel __ro_after_init;
21 static unsigned long pool_size_kernel;
22
23 /* Size can be defined by the coherent_pool command line */
24 static size_t atomic_pool_size;
25
26 /* Dynamic background expansion when the atomic pool is near capacity */
27 static struct work_struct atomic_pool_work;
28
29 static int __init early_coherent_pool(char *p)
30 {
31         atomic_pool_size = memparse(p, &p);
32         return 0;
33 }
34 early_param("coherent_pool", early_coherent_pool);
35
36 static void __init dma_atomic_pool_debugfs_init(void)
37 {
38         struct dentry *root;
39
40         root = debugfs_create_dir("dma_pools", NULL);
41         if (IS_ERR_OR_NULL(root))
42                 return;
43
44         debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
45         debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
46         debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
47 }
48
49 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
50 {
51         if (gfp & __GFP_DMA)
52                 pool_size_dma += size;
53         else if (gfp & __GFP_DMA32)
54                 pool_size_dma32 += size;
55         else
56                 pool_size_kernel += size;
57 }
58
59 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
60                               gfp_t gfp)
61 {
62         unsigned int order;
63         struct page *page;
64         void *addr;
65         int ret = -ENOMEM;
66
67         /* Cannot allocate larger than MAX_ORDER-1 */
68         order = min(get_order(pool_size), MAX_ORDER-1);
69
70         do {
71                 pool_size = 1 << (PAGE_SHIFT + order);
72
73                 if (dev_get_cma_area(NULL))
74                         page = dma_alloc_from_contiguous(NULL, 1 << order,
75                                                          order, false);
76                 else
77                         page = alloc_pages(gfp, order);
78         } while (!page && order-- > 0);
79         if (!page)
80                 goto out;
81
82         arch_dma_prep_coherent(page, pool_size);
83
84 #ifdef CONFIG_DMA_DIRECT_REMAP
85         addr = dma_common_contiguous_remap(page, pool_size,
86                                            pgprot_dmacoherent(PAGE_KERNEL),
87                                            __builtin_return_address(0));
88         if (!addr)
89                 goto free_page;
90 #else
91         addr = page_to_virt(page);
92 #endif
93         /*
94          * Memory in the atomic DMA pools must be unencrypted, the pools do not
95          * shrink so no re-encryption occurs in dma_direct_free_pages().
96          */
97         ret = set_memory_decrypted((unsigned long)page_to_virt(page),
98                                    1 << order);
99         if (ret)
100                 goto remove_mapping;
101         ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
102                                 pool_size, NUMA_NO_NODE);
103         if (ret)
104                 goto encrypt_mapping;
105
106         dma_atomic_pool_size_add(gfp, pool_size);
107         return 0;
108
109 encrypt_mapping:
110         ret = set_memory_encrypted((unsigned long)page_to_virt(page),
111                                    1 << order);
112         if (WARN_ON_ONCE(ret)) {
113                 /* Decrypt succeeded but encrypt failed, purposely leak */
114                 goto out;
115         }
116 remove_mapping:
117 #ifdef CONFIG_DMA_DIRECT_REMAP
118         dma_common_free_remap(addr, pool_size);
119 #endif
120 free_page: __maybe_unused
121         if (!dma_release_from_contiguous(NULL, page, 1 << order))
122                 __free_pages(page, order);
123 out:
124         return ret;
125 }
126
127 static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
128 {
129         if (pool && gen_pool_avail(pool) < atomic_pool_size)
130                 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
131 }
132
133 static void atomic_pool_work_fn(struct work_struct *work)
134 {
135         if (IS_ENABLED(CONFIG_ZONE_DMA))
136                 atomic_pool_resize(atomic_pool_dma,
137                                    GFP_KERNEL | GFP_DMA);
138         if (IS_ENABLED(CONFIG_ZONE_DMA32))
139                 atomic_pool_resize(atomic_pool_dma32,
140                                    GFP_KERNEL | GFP_DMA32);
141         atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
142 }
143
144 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
145                                                       gfp_t gfp)
146 {
147         struct gen_pool *pool;
148         int ret;
149
150         pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
151         if (!pool)
152                 return NULL;
153
154         gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
155
156         ret = atomic_pool_expand(pool, pool_size, gfp);
157         if (ret) {
158                 gen_pool_destroy(pool);
159                 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
160                        pool_size >> 10, &gfp);
161                 return NULL;
162         }
163
164         pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
165                 gen_pool_size(pool) >> 10, &gfp);
166         return pool;
167 }
168
169 static int __init dma_atomic_pool_init(void)
170 {
171         int ret = 0;
172
173         /*
174          * If coherent_pool was not used on the command line, default the pool
175          * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
176          */
177         if (!atomic_pool_size) {
178                 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
179                 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
180                 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
181         }
182         INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
183
184         atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
185                                                     GFP_KERNEL);
186         if (!atomic_pool_kernel)
187                 ret = -ENOMEM;
188         if (IS_ENABLED(CONFIG_ZONE_DMA)) {
189                 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
190                                                 GFP_KERNEL | GFP_DMA);
191                 if (!atomic_pool_dma)
192                         ret = -ENOMEM;
193         }
194         if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
195                 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
196                                                 GFP_KERNEL | GFP_DMA32);
197                 if (!atomic_pool_dma32)
198                         ret = -ENOMEM;
199         }
200
201         dma_atomic_pool_debugfs_init();
202         return ret;
203 }
204 postcore_initcall(dma_atomic_pool_init);
205
206 static inline struct gen_pool *dev_to_pool(struct device *dev)
207 {
208         u64 phys_mask;
209         gfp_t gfp;
210
211         gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
212                                           &phys_mask);
213         if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
214                 return atomic_pool_dma;
215         if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
216                 return atomic_pool_dma32;
217         return atomic_pool_kernel;
218 }
219
220 static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size)
221 {
222         struct gen_pool *pool = dev_to_pool(dev);
223
224         if (unlikely(!pool))
225                 return false;
226         return gen_pool_has_addr(pool, (unsigned long)start, size);
227 }
228
229 void *dma_alloc_from_pool(struct device *dev, size_t size,
230                           struct page **ret_page, gfp_t flags)
231 {
232         struct gen_pool *pool = dev_to_pool(dev);
233         unsigned long val;
234         void *ptr = NULL;
235
236         if (!pool) {
237                 WARN(1, "%pGg atomic pool not initialised!\n", &flags);
238                 return NULL;
239         }
240
241         val = gen_pool_alloc(pool, size);
242         if (likely(val)) {
243                 phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
244
245                 *ret_page = pfn_to_page(__phys_to_pfn(phys));
246                 ptr = (void *)val;
247                 memset(ptr, 0, size);
248         } else {
249                 WARN_ONCE(1, "DMA coherent pool depleted, increase size "
250                              "(recommended min coherent_pool=%zuK)\n",
251                           gen_pool_size(pool) >> 9);
252         }
253         if (gen_pool_avail(pool) < atomic_pool_size)
254                 schedule_work(&atomic_pool_work);
255
256         return ptr;
257 }
258
259 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
260 {
261         struct gen_pool *pool = dev_to_pool(dev);
262
263         if (!dma_in_atomic_pool(dev, start, size))
264                 return false;
265         gen_pool_free(pool, (unsigned long)start, size);
266         return true;
267 }