return (__force gfp_t)(unsigned long)dmab->dev.dev;
}
-static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
+static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
if (WARN_ON_ONCE(!ops || !ops->alloc))
- return -EINVAL;
+ return NULL;
return ops->alloc(dmab, size);
}
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
- int err;
-
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
- dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = NULL;
- err = __snd_dma_alloc_pages(dmab, size);
- if (err < 0)
- return err;
+ dmab->area = __snd_dma_alloc_pages(dmab, size);
if (!dmab->area)
return -ENOMEM;
dmab->bytes = size;
/*
* Continuous pages allocator
*/
-static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
{
gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
- dmab->area = alloc_pages_exact(size, gfp);
- return 0;
+ return alloc_pages_exact(size, gfp);
}
static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
/*
* VMALLOC allocator
*/
-static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
{
gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
- dmab->area = __vmalloc(size, gfp);
- return 0;
+ return __vmalloc(size, gfp);
}
static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
* IRAM allocator
*/
#ifdef CONFIG_GENERIC_ALLOCATOR
-static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
{
struct device *dev = dmab->dev.dev;
struct gen_pool *pool;
+ void *p;
if (dev->of_node) {
pool = of_gen_pool_get(dev->of_node, "iram", 0);
/* Assign the pool into private_data field */
dmab->private_data = pool;
- dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
- PAGE_SIZE);
- if (dmab->area)
- return 0;
+ p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
+ if (p)
+ return p;
}
/* Internal memory might have limited size and no enough space,
/*
* Coherent device pages allocator
*/
-static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
{
gfp_t gfp_flags;
+ void *p;
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
- gfp_flags);
+ p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, gfp_flags);
#ifdef CONFIG_X86
- if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wc((unsigned long)dmab->area,
- PAGE_ALIGN(size) >> PAGE_SHIFT);
+ if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
#endif
- return 0;
+ return p;
}
static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
#define MAX_ALLOC_PAGES 32
-static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
+static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
{
struct snd_sg_buf *sgbuf;
unsigned int i, pages, chunk, maxpages;
struct page **pgtable;
int type = SNDRV_DMA_TYPE_DEV;
pgprot_t prot = PAGE_KERNEL;
+ void *area;
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
- return -ENOMEM;
+ return NULL;
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
type = SNDRV_DMA_TYPE_DEV_UC;
#ifdef pgprot_noncached
}
sgbuf->size = size;
- dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
- if (! dmab->area)
+ area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
+ if (!area)
goto _failed;
- return 0;
+ return area;
_failed:
snd_dma_sg_free(dmab); /* free the table */
- return -ENOMEM;
+ return NULL;
}
static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,