1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (c) 2014 The Linux Foundation
6 #include <linux/dma-direct.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/dma-contiguous.h>
9 #include <linux/init.h>
10 #include <linux/genalloc.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
14 struct page **dma_common_find_pages(void *cpu_addr)
16 struct vm_struct *area = find_vm_area(cpu_addr);
18 if (!area || area->flags != VM_DMA_COHERENT)
23 static struct vm_struct *__dma_common_pages_remap(struct page **pages,
24 size_t size, pgprot_t prot, const void *caller)
26 struct vm_struct *area;
28 area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
32 if (map_vm_area(area, prot, pages)) {
41 * Remaps an array of PAGE_SIZE pages into another vm_area.
42 * Cannot be used in non-sleeping contexts
44 void *dma_common_pages_remap(struct page **pages, size_t size,
45 pgprot_t prot, const void *caller)
47 struct vm_struct *area;
49 area = __dma_common_pages_remap(pages, size, prot, caller);
59 * Remaps an allocated contiguous region into another vm_area.
60 * Cannot be used in non-sleeping contexts
62 void *dma_common_contiguous_remap(struct page *page, size_t size,
63 pgprot_t prot, const void *caller)
67 struct vm_struct *area;
69 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
73 for (i = 0; i < (size >> PAGE_SHIFT); i++)
74 pages[i] = nth_page(page, i);
76 area = __dma_common_pages_remap(pages, size, prot, caller);
86 * Unmaps a range previously mapped by dma_common_*_remap
88 void dma_common_free_remap(void *cpu_addr, size_t size)
90 struct vm_struct *area = find_vm_area(cpu_addr);
92 if (!area || area->flags != VM_DMA_COHERENT) {
93 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
97 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
101 #ifdef CONFIG_DMA_DIRECT_REMAP
102 static struct gen_pool *atomic_pool __ro_after_init;
104 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
105 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
107 static int __init early_coherent_pool(char *p)
109 atomic_pool_size = memparse(p, &p);
112 early_param("coherent_pool", early_coherent_pool);
114 static gfp_t dma_atomic_pool_gfp(void)
116 if (IS_ENABLED(CONFIG_ZONE_DMA))
118 if (IS_ENABLED(CONFIG_ZONE_DMA32))
123 static int __init dma_atomic_pool_init(void)
125 unsigned int pool_size_order = get_order(atomic_pool_size);
126 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
131 if (dev_get_cma_area(NULL))
132 page = dma_alloc_from_contiguous(NULL, nr_pages,
133 pool_size_order, false);
135 page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
139 arch_dma_prep_coherent(page, atomic_pool_size);
141 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
145 addr = dma_common_contiguous_remap(page, atomic_pool_size,
146 pgprot_dmacoherent(PAGE_KERNEL),
147 __builtin_return_address(0));
149 goto destroy_genpool;
151 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
152 page_to_phys(page), atomic_pool_size, -1);
155 gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
157 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
158 atomic_pool_size / 1024);
162 dma_common_free_remap(addr, atomic_pool_size);
164 gen_pool_destroy(atomic_pool);
167 if (!dma_release_from_contiguous(NULL, page, nr_pages))
168 __free_pages(page, pool_size_order);
170 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
171 atomic_pool_size / 1024);
174 postcore_initcall(dma_atomic_pool_init);
176 bool dma_in_atomic_pool(void *start, size_t size)
178 if (unlikely(!atomic_pool))
181 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
184 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
190 WARN(1, "coherent pool not initialised!\n");
194 val = gen_pool_alloc(atomic_pool, size);
196 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
198 *ret_page = pfn_to_page(__phys_to_pfn(phys));
200 memset(ptr, 0, size);
206 bool dma_free_from_pool(void *start, size_t size)
208 if (!dma_in_atomic_pool(start, size))
210 gen_pool_free(atomic_pool, (unsigned long)start, size);
213 #endif /* CONFIG_DMA_DIRECT_REMAP */