1 // SPDX-License-Identifier: GPL-2.0+
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Marek Szyprowski <m.szyprowski@samsung.com>
7 * Michal Nazarewicz <mina86@mina86.com>
10 #define pr_fmt(fmt) "cma: " fmt
12 #ifdef CONFIG_CMA_DEBUG
19 #include <asm/dma-contiguous.h>
21 #include <linux/memblock.h>
22 #include <linux/err.h>
23 #include <linux/sizes.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/cma.h>
27 #ifdef CONFIG_CMA_SIZE_MBYTES
28 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
30 #define CMA_SIZE_MBYTES 0
33 struct cma *dma_contiguous_default_area;
36 * Default global CMA area size can be defined in kernel's .config.
37 * This is useful mainly for distro maintainers to create a kernel
38 * that works correctly for most supported systems.
39 * The size can be set in bytes or as a percentage of the total memory
42 * Users, who want to set the size of global CMA area for their system
43 * should use cma= kernel parameter.
45 static const phys_addr_t size_bytes __initconst =
46 (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
47 static phys_addr_t size_cmdline __initdata = -1;
48 static phys_addr_t base_cmdline __initdata;
49 static phys_addr_t limit_cmdline __initdata;
51 static int __init early_cma(char *p)
54 pr_err("Config string not provided\n");
58 size_cmdline = memparse(p, &p);
61 base_cmdline = memparse(p + 1, &p);
63 limit_cmdline = base_cmdline + size_cmdline;
66 limit_cmdline = memparse(p + 1, &p);
70 early_param("cma", early_cma);
72 #ifdef CONFIG_DMA_PERNUMA_CMA
74 static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
75 static phys_addr_t pernuma_size_bytes __initdata;
77 static int __init early_cma_pernuma(char *p)
79 pernuma_size_bytes = memparse(p, &p);
82 early_param("cma_pernuma", early_cma_pernuma);
85 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
87 static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
89 struct memblock_region *reg;
90 unsigned long total_pages = 0;
93 * We cannot use memblock_phys_mem_size() here, because
94 * memblock_analyze() has not been called yet.
96 for_each_memblock(memory, reg)
97 total_pages += memblock_region_memory_end_pfn(reg) -
98 memblock_region_memory_base_pfn(reg);
100 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
105 static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
112 #ifdef CONFIG_DMA_PERNUMA_CMA
113 void __init dma_pernuma_cma_reserve(void)
117 if (!pernuma_size_bytes)
120 for_each_online_node(nid) {
122 char name[CMA_MAX_NAME];
123 struct cma **cma = &dma_contiguous_pernuma_area[nid];
125 snprintf(name, sizeof(name), "pernuma%d", nid);
126 ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
127 0, false, name, cma, nid);
129 pr_warn("%s: reservation failed: err %d, node %d", __func__,
134 pr_debug("%s: reserved %llu MiB on node %d\n", __func__,
135 (unsigned long long)pernuma_size_bytes / SZ_1M, nid);
141 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
142 * @limit: End address of the reserved memory (optional, 0 for any).
144 * This function reserves memory from early allocator. It should be
145 * called by arch specific code once the early allocator (memblock or bootmem)
146 * has been activated and all other subsystems have already allocated/reserved
149 void __init dma_contiguous_reserve(phys_addr_t limit)
151 phys_addr_t selected_size = 0;
152 phys_addr_t selected_base = 0;
153 phys_addr_t selected_limit = limit;
156 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
158 if (size_cmdline != -1) {
159 selected_size = size_cmdline;
160 selected_base = base_cmdline;
161 selected_limit = min_not_zero(limit_cmdline, limit);
162 if (base_cmdline + size_cmdline == limit_cmdline)
165 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
166 selected_size = size_bytes;
167 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
168 selected_size = cma_early_percent_memory();
169 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
170 selected_size = min(size_bytes, cma_early_percent_memory());
171 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
172 selected_size = max(size_bytes, cma_early_percent_memory());
176 if (selected_size && !dma_contiguous_default_area) {
177 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
178 (unsigned long)selected_size / SZ_1M);
180 dma_contiguous_reserve_area(selected_size, selected_base,
182 &dma_contiguous_default_area,
188 * dma_contiguous_reserve_area() - reserve custom contiguous area
189 * @size: Size of the reserved area (in bytes),
190 * @base: Base address of the reserved area optional, use 0 for any
191 * @limit: End address of the reserved memory (optional, 0 for any).
192 * @res_cma: Pointer to store the created cma region.
193 * @fixed: hint about where to place the reserved area
195 * This function reserves memory from early allocator. It should be
196 * called by arch specific code once the early allocator (memblock or bootmem)
197 * has been activated and all other subsystems have already allocated/reserved
198 * memory. This function allows to create custom reserved areas for specific
201 * If @fixed is true, reserve contiguous area at exactly @base. If false,
202 * reserve in range from @base to @limit.
204 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
205 phys_addr_t limit, struct cma **res_cma,
210 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
211 "reserved", res_cma);
215 /* Architecture specific contiguous memory fixup. */
216 dma_contiguous_early_fixup(cma_get_base(*res_cma),
217 cma_get_size(*res_cma));
223 * dma_alloc_from_contiguous() - allocate pages from contiguous area
224 * @dev: Pointer to device for which the allocation is performed.
225 * @count: Requested number of pages.
226 * @align: Requested alignment of pages (in PAGE_SIZE order).
227 * @no_warn: Avoid printing message about failed allocation.
229 * This function allocates memory buffer for specified device. It uses
230 * device specific contiguous memory area if available or the default
231 * global one. Requires architecture specific dev_get_cma_area() helper
234 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
235 unsigned int align, bool no_warn)
237 if (align > CONFIG_CMA_ALIGNMENT)
238 align = CONFIG_CMA_ALIGNMENT;
240 return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
244 * dma_release_from_contiguous() - release allocated pages
245 * @dev: Pointer to device for which the pages were allocated.
246 * @pages: Allocated pages.
247 * @count: Number of allocated pages.
249 * This function releases memory allocated by dma_alloc_from_contiguous().
250 * It returns false when provided pages do not belong to contiguous area and
253 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
256 return cma_release(dev_get_cma_area(dev), pages, count);
259 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
261 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
263 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
267 * dma_alloc_contiguous() - allocate contiguous pages
268 * @dev: Pointer to device for which the allocation is performed.
269 * @size: Requested allocation size.
270 * @gfp: Allocation flags.
272 * tries to use device specific contiguous memory area if available, or it
273 * tries to use per-numa cma, if the allocation fails, it will fallback to
274 * try default global one.
276 * Note that it bypass one-page size of allocations from the per-numa and
277 * global area as the addresses within one page are always contiguous, so
278 * there is no need to waste CMA pages for that kind; it also helps reduce
281 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
283 #ifdef CONFIG_DMA_PERNUMA_CMA
284 int nid = dev_to_node(dev);
287 /* CMA can be used only in the context which permits sleeping */
288 if (!gfpflags_allow_blocking(gfp))
291 return cma_alloc_aligned(dev->cma_area, size, gfp);
292 if (size <= PAGE_SIZE)
295 #ifdef CONFIG_DMA_PERNUMA_CMA
296 if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
297 struct cma *cma = dma_contiguous_pernuma_area[nid];
301 page = cma_alloc_aligned(cma, size, gfp);
307 if (!dma_contiguous_default_area)
310 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
314 * dma_free_contiguous() - release allocated pages
315 * @dev: Pointer to device for which the pages were allocated.
316 * @page: Pointer to the allocated pages.
317 * @size: Size of allocated pages.
319 * This function releases memory allocated by dma_alloc_contiguous(). As the
320 * cma_release returns false when provided pages do not belong to contiguous
321 * area and true otherwise, this function then does a fallback __free_pages()
322 * upon a false-return.
324 void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
326 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
328 /* if dev has its own cma, free page from there */
330 if (cma_release(dev->cma_area, page, count))
334 * otherwise, page is from either per-numa cma or default cma
336 #ifdef CONFIG_DMA_PERNUMA_CMA
337 if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
341 if (cma_release(dma_contiguous_default_area, page, count))
345 /* not in any cma, free from buddy */
346 __free_pages(page, get_order(size));
350 * Support for reserved memory regions defined in device tree
352 #ifdef CONFIG_OF_RESERVED_MEM
353 #include <linux/of.h>
354 #include <linux/of_fdt.h>
355 #include <linux/of_reserved_mem.h>
358 #define pr_fmt(fmt) fmt
360 static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
362 dev_set_cma_area(dev, rmem->priv);
366 static void rmem_cma_device_release(struct reserved_mem *rmem,
369 dev_set_cma_area(dev, NULL);
372 static const struct reserved_mem_ops rmem_cma_ops = {
373 .device_init = rmem_cma_device_init,
374 .device_release = rmem_cma_device_release,
377 static int __init rmem_cma_setup(struct reserved_mem *rmem)
379 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
380 phys_addr_t mask = align - 1;
381 unsigned long node = rmem->fdt_node;
382 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
386 if (size_cmdline != -1 && default_cma) {
387 pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
392 if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
393 of_get_flat_dt_prop(node, "no-map", NULL))
396 if ((rmem->base & mask) || (rmem->size & mask)) {
397 pr_err("Reserved memory: incorrect alignment of CMA region\n");
401 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
403 pr_err("Reserved memory: unable to setup CMA region\n");
406 /* Architecture specific contiguous memory fixup. */
407 dma_contiguous_early_fixup(rmem->base, rmem->size);
410 dma_contiguous_set_default(cma);
412 rmem->ops = &rmem_cma_ops;
415 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
416 &rmem->base, (unsigned long)rmem->size / SZ_1M);
420 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);