1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
6 * Generic memory allocators
9 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/highmem.h>
14 #include <linux/vmalloc.h>
16 #include <asm/set_memory.h>
18 #include <sound/memalloc.h>
19 #include "memalloc_local.h"
21 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
23 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
24 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
30 return (__force gfp_t)(unsigned long)dmab->dev.dev;
33 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
35 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
37 if (WARN_ON_ONCE(!ops || !ops->alloc))
39 return ops->alloc(dmab, size);
43 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
45 * @type: the DMA buffer type
46 * @device: the device pointer
48 * @size: the buffer size to allocate
49 * @dmab: buffer allocation record to store the allocated data
51 * Calls the memory-allocator function for the corresponding
54 * Return: Zero if the buffer with the given size is allocated successfully,
55 * otherwise a negative value on error.
57 int snd_dma_alloc_dir_pages(int type, struct device *device,
58 enum dma_data_direction dir, size_t size,
59 struct snd_dma_buffer *dmab)
66 size = PAGE_ALIGN(size);
67 dmab->dev.type = type;
68 dmab->dev.dev = device;
72 dmab->private_data = NULL;
73 dmab->area = __snd_dma_alloc_pages(dmab, size);
79 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
82 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
83 * @type: the DMA buffer type
84 * @device: the device pointer
85 * @size: the buffer size to allocate
86 * @dmab: buffer allocation record to store the allocated data
88 * Calls the memory-allocator function for the corresponding
89 * buffer type. When no space is left, this function reduces the size and
90 * tries to allocate again. The size actually allocated is stored in
93 * Return: Zero if the buffer with the given size is allocated successfully,
94 * otherwise a negative value on error.
96 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
97 struct snd_dma_buffer *dmab)
101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
104 if (size <= PAGE_SIZE)
107 size = PAGE_SIZE << get_order(size);
113 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
116 * snd_dma_free_pages - release the allocated buffer
117 * @dmab: the buffer allocation record to release
119 * Releases the allocated buffer via snd_dma_alloc_pages().
121 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
125 if (ops && ops->free)
128 EXPORT_SYMBOL(snd_dma_free_pages);
130 /* called by devres */
131 static void __snd_release_pages(struct device *dev, void *res)
133 snd_dma_free_pages(res);
137 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
138 * @dev: the device pointer
139 * @type: the DMA buffer type
140 * @dir: DMA direction
141 * @size: the buffer size to allocate
143 * Allocate buffer pages depending on the given type and manage using devres.
144 * The pages will be released automatically at the device removal.
146 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
147 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
148 * SNDRV_DMA_TYPE_VMALLOC type.
150 * Return: the snd_dma_buffer object at success, or NULL if failed
152 struct snd_dma_buffer *
153 snd_devm_alloc_dir_pages(struct device *dev, int type,
154 enum dma_data_direction dir, size_t size)
156 struct snd_dma_buffer *dmab;
159 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
160 type == SNDRV_DMA_TYPE_VMALLOC))
163 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
167 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
173 devres_add(dev, dmab);
176 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
179 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
180 * @dmab: buffer allocation information
181 * @area: VM area information
183 * Return: zero if successful, or a negative error code
185 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
186 struct vm_area_struct *area)
188 const struct snd_malloc_ops *ops;
192 ops = snd_dma_get_ops(dmab);
193 if (ops && ops->mmap)
194 return ops->mmap(dmab, area);
198 EXPORT_SYMBOL(snd_dma_buffer_mmap);
200 #ifdef CONFIG_HAS_DMA
202 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
203 * @dmab: buffer allocation information
206 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
207 enum snd_dma_sync_mode mode)
209 const struct snd_malloc_ops *ops;
211 if (!dmab || !dmab->dev.need_sync)
213 ops = snd_dma_get_ops(dmab);
214 if (ops && ops->sync)
215 ops->sync(dmab, mode);
217 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
218 #endif /* CONFIG_HAS_DMA */
221 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
222 * @dmab: buffer allocation information
223 * @offset: offset in the ring buffer
225 * Return: the physical address
227 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
229 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
231 if (ops && ops->get_addr)
232 return ops->get_addr(dmab, offset);
234 return dmab->addr + offset;
236 EXPORT_SYMBOL(snd_sgbuf_get_addr);
239 * snd_sgbuf_get_page - return the physical page at the corresponding offset
240 * @dmab: buffer allocation information
241 * @offset: offset in the ring buffer
243 * Return: the page pointer
245 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
247 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
249 if (ops && ops->get_page)
250 return ops->get_page(dmab, offset);
252 return virt_to_page(dmab->area + offset);
254 EXPORT_SYMBOL(snd_sgbuf_get_page);
257 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
259 * @dmab: buffer allocation information
260 * @ofs: offset in the ring buffer
261 * @size: the requested size
263 * Return: the chunk size
265 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
266 unsigned int ofs, unsigned int size)
268 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
270 if (ops && ops->get_chunk_size)
271 return ops->get_chunk_size(dmab, ofs, size);
275 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
278 * Continuous pages allocator
280 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
282 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
283 void *p = alloc_pages_exact(size, gfp);
286 dmab->addr = page_to_phys(virt_to_page(p));
290 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
292 free_pages_exact(dmab->area, dmab->bytes);
295 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
296 struct vm_area_struct *area)
298 return remap_pfn_range(area, area->vm_start,
299 dmab->addr >> PAGE_SHIFT,
300 area->vm_end - area->vm_start,
304 static const struct snd_malloc_ops snd_dma_continuous_ops = {
305 .alloc = snd_dma_continuous_alloc,
306 .free = snd_dma_continuous_free,
307 .mmap = snd_dma_continuous_mmap,
313 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
315 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
317 return __vmalloc(size, gfp);
320 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
325 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
326 struct vm_area_struct *area)
328 return remap_vmalloc_range(area, dmab->area, 0);
331 #define get_vmalloc_page_addr(dmab, offset) \
332 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
334 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
337 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
340 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
343 return vmalloc_to_page(dmab->area + offset);
347 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
348 unsigned int ofs, unsigned int size)
350 unsigned int start, end;
353 start = ALIGN_DOWN(ofs, PAGE_SIZE);
354 end = ofs + size - 1; /* the last byte address */
355 /* check page continuity */
356 addr = get_vmalloc_page_addr(dmab, start);
362 if (get_vmalloc_page_addr(dmab, start) != addr)
365 /* ok, all on continuous pages */
369 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
370 .alloc = snd_dma_vmalloc_alloc,
371 .free = snd_dma_vmalloc_free,
372 .mmap = snd_dma_vmalloc_mmap,
373 .get_addr = snd_dma_vmalloc_get_addr,
374 .get_page = snd_dma_vmalloc_get_page,
375 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
378 #ifdef CONFIG_HAS_DMA
382 #ifdef CONFIG_GENERIC_ALLOCATOR
383 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
385 struct device *dev = dmab->dev.dev;
386 struct gen_pool *pool;
390 pool = of_gen_pool_get(dev->of_node, "iram", 0);
391 /* Assign the pool into private_data field */
392 dmab->private_data = pool;
394 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
399 /* Internal memory might have limited size and no enough space,
400 * so if we fail to malloc, try to fetch memory traditionally.
402 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
403 return __snd_dma_alloc_pages(dmab, size);
406 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
408 struct gen_pool *pool = dmab->private_data;
410 if (pool && dmab->area)
411 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
414 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
415 struct vm_area_struct *area)
417 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
418 return remap_pfn_range(area, area->vm_start,
419 dmab->addr >> PAGE_SHIFT,
420 area->vm_end - area->vm_start,
424 static const struct snd_malloc_ops snd_dma_iram_ops = {
425 .alloc = snd_dma_iram_alloc,
426 .free = snd_dma_iram_free,
427 .mmap = snd_dma_iram_mmap,
429 #endif /* CONFIG_GENERIC_ALLOCATOR */
431 #define DEFAULT_GFP \
433 __GFP_COMP | /* compound page lets parts be mapped */ \
434 __GFP_NORETRY | /* don't trigger OOM-killer */ \
435 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
438 * Coherent device pages allocator
440 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
442 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
445 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
447 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
450 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
451 struct vm_area_struct *area)
453 return dma_mmap_coherent(dmab->dev.dev, area,
454 dmab->area, dmab->addr, dmab->bytes);
457 static const struct snd_malloc_ops snd_dma_dev_ops = {
458 .alloc = snd_dma_dev_alloc,
459 .free = snd_dma_dev_free,
460 .mmap = snd_dma_dev_mmap,
464 * Write-combined pages
466 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
468 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
471 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
473 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
476 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
477 struct vm_area_struct *area)
479 return dma_mmap_wc(dmab->dev.dev, area,
480 dmab->area, dmab->addr, dmab->bytes);
483 static const struct snd_malloc_ops snd_dma_wc_ops = {
484 .alloc = snd_dma_wc_alloc,
485 .free = snd_dma_wc_free,
486 .mmap = snd_dma_wc_mmap,
489 #ifdef CONFIG_SND_DMA_SGBUF
490 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
494 * Non-contiguous pages allocator
496 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
498 struct sg_table *sgt;
501 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
504 #ifdef CONFIG_SND_DMA_SGBUF
505 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
506 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
508 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
509 return snd_dma_sg_fallback_alloc(dmab, size);
515 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
516 sg_dma_address(sgt->sgl));
517 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
519 dmab->private_data = sgt;
521 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
525 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
527 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
528 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
532 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
533 struct vm_area_struct *area)
535 return dma_mmap_noncontiguous(dmab->dev.dev, area,
536 dmab->bytes, dmab->private_data);
539 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
540 enum snd_dma_sync_mode mode)
542 if (mode == SNDRV_DMA_SYNC_CPU) {
543 if (dmab->dev.dir == DMA_TO_DEVICE)
545 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
546 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
549 if (dmab->dev.dir == DMA_FROM_DEVICE)
551 flush_kernel_vmap_range(dmab->area, dmab->bytes);
552 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
557 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
558 struct sg_page_iter *piter,
561 struct sg_table *sgt = dmab->private_data;
563 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
564 offset >> PAGE_SHIFT);
567 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
570 struct sg_dma_page_iter iter;
572 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
573 __sg_page_iter_dma_next(&iter);
574 return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
577 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
580 struct sg_page_iter iter;
582 snd_dma_noncontig_iter_set(dmab, &iter, offset);
583 __sg_page_iter_next(&iter);
584 return sg_page_iter_page(&iter);
588 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
589 unsigned int ofs, unsigned int size)
591 struct sg_dma_page_iter iter;
592 unsigned int start, end;
595 start = ALIGN_DOWN(ofs, PAGE_SIZE);
596 end = ofs + size - 1; /* the last byte address */
597 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
598 if (!__sg_page_iter_dma_next(&iter))
600 /* check page continuity */
601 addr = sg_page_iter_dma_address(&iter);
607 if (!__sg_page_iter_dma_next(&iter) ||
608 sg_page_iter_dma_address(&iter) != addr)
611 /* ok, all on continuous pages */
615 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
616 .alloc = snd_dma_noncontig_alloc,
617 .free = snd_dma_noncontig_free,
618 .mmap = snd_dma_noncontig_mmap,
619 .sync = snd_dma_noncontig_sync,
620 .get_addr = snd_dma_noncontig_get_addr,
621 .get_page = snd_dma_noncontig_get_page,
622 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
625 /* x86-specific SG-buffer with WC pages */
626 #ifdef CONFIG_SND_DMA_SGBUF
627 #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
629 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
631 void *p = snd_dma_noncontig_alloc(dmab, size);
632 struct sg_table *sgt = dmab->private_data;
633 struct sg_page_iter iter;
637 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
639 for_each_sgtable_page(sgt, &iter, 0)
640 set_memory_wc(sg_wc_address(&iter), 1);
644 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
646 struct sg_table *sgt = dmab->private_data;
647 struct sg_page_iter iter;
649 for_each_sgtable_page(sgt, &iter, 0)
650 set_memory_wb(sg_wc_address(&iter), 1);
651 snd_dma_noncontig_free(dmab);
654 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
655 struct vm_area_struct *area)
657 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
658 return dma_mmap_noncontiguous(dmab->dev.dev, area,
659 dmab->bytes, dmab->private_data);
662 static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
663 .alloc = snd_dma_sg_wc_alloc,
664 .free = snd_dma_sg_wc_free,
665 .mmap = snd_dma_sg_wc_mmap,
666 .sync = snd_dma_noncontig_sync,
667 .get_addr = snd_dma_noncontig_get_addr,
668 .get_page = snd_dma_noncontig_get_page,
669 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
672 /* Fallback SG-buffer allocations for x86 */
673 struct snd_dma_sg_fallback {
679 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
680 struct snd_dma_sg_fallback *sgbuf)
684 if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
685 set_pages_array_wb(sgbuf->pages, sgbuf->count);
686 for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
687 dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
688 page_address(sgbuf->pages[i]),
690 kvfree(sgbuf->pages);
691 kvfree(sgbuf->addrs);
695 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
697 struct snd_dma_sg_fallback *sgbuf;
702 sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
705 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
706 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
709 sgbuf->pages = pages;
710 sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
714 for (i = 0; i < count; sgbuf->count++, i++) {
715 p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
716 &sgbuf->addrs[i], DEFAULT_GFP);
719 sgbuf->pages[i] = virt_to_page(p);
722 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
723 set_pages_array_wc(pages, count);
724 p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
727 dmab->private_data = sgbuf;
731 __snd_dma_sg_fallback_free(dmab, sgbuf);
735 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
738 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
741 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
742 struct vm_area_struct *area)
744 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
746 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
747 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
748 return vm_map_pages(area, sgbuf->pages, sgbuf->count);
751 static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
752 .alloc = snd_dma_sg_fallback_alloc,
753 .free = snd_dma_sg_fallback_free,
754 .mmap = snd_dma_sg_fallback_mmap,
755 /* reuse vmalloc helpers */
756 .get_addr = snd_dma_vmalloc_get_addr,
757 .get_page = snd_dma_vmalloc_get_page,
758 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
760 #endif /* CONFIG_SND_DMA_SGBUF */
763 * Non-coherent pages allocator
765 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
769 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
770 dmab->dev.dir, DEFAULT_GFP);
772 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
776 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
778 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
779 dmab->addr, dmab->dev.dir);
782 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
783 struct vm_area_struct *area)
785 area->vm_page_prot = vm_get_page_prot(area->vm_flags);
786 return dma_mmap_pages(dmab->dev.dev, area,
787 area->vm_end - area->vm_start,
788 virt_to_page(dmab->area));
791 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
792 enum snd_dma_sync_mode mode)
794 if (mode == SNDRV_DMA_SYNC_CPU) {
795 if (dmab->dev.dir != DMA_TO_DEVICE)
796 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
797 dmab->bytes, dmab->dev.dir);
799 if (dmab->dev.dir != DMA_FROM_DEVICE)
800 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
801 dmab->bytes, dmab->dev.dir);
805 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
806 .alloc = snd_dma_noncoherent_alloc,
807 .free = snd_dma_noncoherent_free,
808 .mmap = snd_dma_noncoherent_mmap,
809 .sync = snd_dma_noncoherent_sync,
812 #endif /* CONFIG_HAS_DMA */
817 static const struct snd_malloc_ops *dma_ops[] = {
818 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
819 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
820 #ifdef CONFIG_HAS_DMA
821 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
822 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
823 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
824 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
825 #ifdef CONFIG_SND_DMA_SGBUF
826 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
828 #ifdef CONFIG_GENERIC_ALLOCATOR
829 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
830 #endif /* CONFIG_GENERIC_ALLOCATOR */
831 #ifdef CONFIG_SND_DMA_SGBUF
832 [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
833 [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
835 #endif /* CONFIG_HAS_DMA */
838 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
840 if (WARN_ON_ONCE(!dmab))
842 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
843 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
845 return dma_ops[dmab->dev.type];