1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
6 * Generic memory allocators
9 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/vmalloc.h>
15 #include <asm/set_memory.h>
17 #include <sound/memalloc.h>
18 #include "memalloc_local.h"
20 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
23 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
29 return (__force gfp_t)(unsigned long)dmab->dev.dev;
32 static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
34 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
36 if (WARN_ON_ONCE(!ops || !ops->alloc))
38 return ops->alloc(dmab, size);
42 * snd_dma_alloc_pages - allocate the buffer area according to the given type
43 * @type: the DMA buffer type
44 * @device: the device pointer
45 * @size: the buffer size to allocate
46 * @dmab: buffer allocation record to store the allocated data
48 * Calls the memory-allocator function for the corresponding
51 * Return: Zero if the buffer with the given size is allocated successfully,
52 * otherwise a negative value on error.
54 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
55 struct snd_dma_buffer *dmab)
64 size = PAGE_ALIGN(size);
65 dmab->dev.type = type;
66 dmab->dev.dev = device;
70 dmab->private_data = NULL;
71 err = __snd_dma_alloc_pages(dmab, size);
79 EXPORT_SYMBOL(snd_dma_alloc_pages);
82 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
83 * @type: the DMA buffer type
84 * @device: the device pointer
85 * @size: the buffer size to allocate
86 * @dmab: buffer allocation record to store the allocated data
88 * Calls the memory-allocator function for the corresponding
89 * buffer type. When no space is left, this function reduces the size and
90 * tries to allocate again. The size actually allocated is stored in
93 * Return: Zero if the buffer with the given size is allocated successfully,
94 * otherwise a negative value on error.
96 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
97 struct snd_dma_buffer *dmab)
101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
104 if (size <= PAGE_SIZE)
107 size = PAGE_SIZE << get_order(size);
113 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
116 * snd_dma_free_pages - release the allocated buffer
117 * @dmab: the buffer allocation record to release
119 * Releases the allocated buffer via snd_dma_alloc_pages().
121 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
125 if (ops && ops->free)
128 EXPORT_SYMBOL(snd_dma_free_pages);
130 /* called by devres */
131 static void __snd_release_pages(struct device *dev, void *res)
133 snd_dma_free_pages(res);
137 * snd_devm_alloc_pages - allocate the buffer and manage with devres
138 * @dev: the device pointer
139 * @type: the DMA buffer type
140 * @size: the buffer size to allocate
142 * Allocate buffer pages depending on the given type and manage using devres.
143 * The pages will be released automatically at the device removal.
145 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
146 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
147 * SNDRV_DMA_TYPE_VMALLOC type.
149 * The function returns the snd_dma_buffer object at success, or NULL if failed.
151 struct snd_dma_buffer *
152 snd_devm_alloc_pages(struct device *dev, int type, size_t size)
154 struct snd_dma_buffer *dmab;
157 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
158 type == SNDRV_DMA_TYPE_VMALLOC))
161 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
165 err = snd_dma_alloc_pages(type, dev, size, dmab);
171 devres_add(dev, dmab);
174 EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
177 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
178 * @dmab: buffer allocation information
179 * @area: VM area information
181 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
182 struct vm_area_struct *area)
184 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
186 if (ops && ops->mmap)
187 return ops->mmap(dmab, area);
191 EXPORT_SYMBOL(snd_dma_buffer_mmap);
194 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
195 * @dmab: buffer allocation information
196 * @offset: offset in the ring buffer
198 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
200 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
202 if (ops && ops->get_addr)
203 return ops->get_addr(dmab, offset);
205 return dmab->addr + offset;
207 EXPORT_SYMBOL(snd_sgbuf_get_addr);
210 * snd_sgbuf_get_page - return the physical page at the corresponding offset
211 * @dmab: buffer allocation information
212 * @offset: offset in the ring buffer
214 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
216 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
218 if (ops && ops->get_page)
219 return ops->get_page(dmab, offset);
221 return virt_to_page(dmab->area + offset);
223 EXPORT_SYMBOL(snd_sgbuf_get_page);
226 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
228 * @dmab: buffer allocation information
229 * @ofs: offset in the ring buffer
230 * @size: the requested size
232 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
233 unsigned int ofs, unsigned int size)
235 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
237 if (ops && ops->get_chunk_size)
238 return ops->get_chunk_size(dmab, ofs, size);
242 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
245 * Continuous pages allocator
247 static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
249 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
251 dmab->area = alloc_pages_exact(size, gfp);
255 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
257 free_pages_exact(dmab->area, dmab->bytes);
260 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
261 struct vm_area_struct *area)
263 return remap_pfn_range(area, area->vm_start,
264 page_to_pfn(virt_to_page(dmab->area)),
265 area->vm_end - area->vm_start,
269 static const struct snd_malloc_ops snd_dma_continuous_ops = {
270 .alloc = snd_dma_continuous_alloc,
271 .free = snd_dma_continuous_free,
272 .mmap = snd_dma_continuous_mmap,
278 static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
280 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
282 dmab->area = __vmalloc(size, gfp);
286 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
291 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
292 struct vm_area_struct *area)
294 return remap_vmalloc_range(area, dmab->area, 0);
297 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
300 return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
304 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
307 return vmalloc_to_page(dmab->area + offset);
311 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
312 unsigned int ofs, unsigned int size)
316 if (size > PAGE_SIZE)
321 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
322 .alloc = snd_dma_vmalloc_alloc,
323 .free = snd_dma_vmalloc_free,
324 .mmap = snd_dma_vmalloc_mmap,
325 .get_addr = snd_dma_vmalloc_get_addr,
326 .get_page = snd_dma_vmalloc_get_page,
327 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
330 #ifdef CONFIG_HAS_DMA
334 #ifdef CONFIG_GENERIC_ALLOCATOR
335 static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
337 struct device *dev = dmab->dev.dev;
338 struct gen_pool *pool;
341 pool = of_gen_pool_get(dev->of_node, "iram", 0);
342 /* Assign the pool into private_data field */
343 dmab->private_data = pool;
345 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
351 /* Internal memory might have limited size and no enough space,
352 * so if we fail to malloc, try to fetch memory traditionally.
354 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
355 return __snd_dma_alloc_pages(dmab, size);
358 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
360 struct gen_pool *pool = dmab->private_data;
362 if (pool && dmab->area)
363 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
366 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
367 struct vm_area_struct *area)
369 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
370 return remap_pfn_range(area, area->vm_start,
371 dmab->addr >> PAGE_SHIFT,
372 area->vm_end - area->vm_start,
376 static const struct snd_malloc_ops snd_dma_iram_ops = {
377 .alloc = snd_dma_iram_alloc,
378 .free = snd_dma_iram_free,
379 .mmap = snd_dma_iram_mmap,
381 #endif /* CONFIG_GENERIC_ALLOCATOR */
384 * Coherent device pages allocator
386 static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
390 gfp_flags = GFP_KERNEL
391 | __GFP_COMP /* compound page lets parts be mapped */
392 | __GFP_NORETRY /* don't trigger OOM-killer */
393 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
394 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
397 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
398 set_memory_wc((unsigned long)dmab->area,
399 PAGE_ALIGN(size) >> PAGE_SHIFT);
404 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
407 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
408 set_memory_wb((unsigned long)dmab->area,
409 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
411 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
414 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
415 struct vm_area_struct *area)
417 return dma_mmap_coherent(dmab->dev.dev, area,
418 dmab->area, dmab->addr, dmab->bytes);
421 static const struct snd_malloc_ops snd_dma_dev_ops = {
422 .alloc = snd_dma_dev_alloc,
423 .free = snd_dma_dev_free,
424 .mmap = snd_dma_dev_mmap,
426 #endif /* CONFIG_HAS_DMA */
431 static const struct snd_malloc_ops *dma_ops[] = {
432 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
433 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
434 #ifdef CONFIG_HAS_DMA
435 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
436 [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
437 #ifdef CONFIG_GENERIC_ALLOCATOR
438 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
439 #endif /* CONFIG_GENERIC_ALLOCATOR */
440 #endif /* CONFIG_HAS_DMA */
441 #ifdef CONFIG_SND_DMA_SGBUF
442 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
443 [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
447 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
449 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
450 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
452 return dma_ops[dmab->dev.type];