1 /* SPDX-License-Identifier: GPL-2.0-or-later */
6 * Contiguous Memory Allocator for DMA mapping framework
7 * Copyright (c) 2010-2011 by Samsung Electronics.
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
14 * Contiguous Memory Allocator
16 * The Contiguous Memory Allocator (CMA) makes it possible to
17 * allocate big contiguous chunks of memory after the system has
22 * Various devices on embedded systems have no scatter-getter and/or
23 * IO map support and require contiguous blocks of memory to
24 * operate. They include devices such as cameras, hardware video
27 * Such devices often require big memory buffers (a full HD frame
28 * is, for instance, more then 2 mega pixels large, i.e. more than 6
29 * MB of memory), which makes mechanisms such as kmalloc() or
30 * alloc_page() ineffective.
32 * At the same time, a solution where a big memory region is
33 * reserved for a device is suboptimal since often more memory is
34 * reserved then strictly required and, moreover, the memory is
35 * inaccessible to page system even if device drivers don't use it.
37 * CMA tries to solve this issue by operating on memory regions
38 * where only movable pages can be allocated from. This way, kernel
39 * can use the memory for pagecache and when device driver requests
40 * it, allocated pages can be migrated.
44 * CMA should not be used by the device drivers directly. It is
45 * only a helper framework for dma-mapping subsystem.
47 * For more information, see kernel-docs in kernel/dma/contiguous.c
52 #include <linux/device.h>
60 extern struct cma *dma_contiguous_default_area;
62 static inline struct cma *dev_get_cma_area(struct device *dev)
64 if (dev && dev->cma_area)
66 return dma_contiguous_default_area;
69 void dma_contiguous_reserve(phys_addr_t addr_limit);
71 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
72 phys_addr_t limit, struct cma **res_cma,
75 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
76 unsigned int order, bool no_warn);
77 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
79 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
80 void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
84 static inline struct cma *dev_get_cma_area(struct device *dev)
89 static inline void dma_contiguous_reserve(phys_addr_t limit) { }
91 static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
92 phys_addr_t limit, struct cma **res_cma,
99 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
100 unsigned int order, bool no_warn)
106 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
112 /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
113 static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
119 static inline void dma_free_contiguous(struct device *dev, struct page *page,
122 __free_pages(page, get_order(size));
127 #ifdef CONFIG_DMA_PERNUMA_CMA
128 void dma_pernuma_cma_reserve(void);
130 static inline void dma_pernuma_cma_reserve(void) { }