2 * Contiguous Memory Allocator
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
19 #define pr_fmt(fmt) "cma: " fmt
21 #ifdef CONFIG_CMA_DEBUG
27 #include <linux/memblock.h>
28 #include <linux/err.h>
30 #include <linux/mutex.h>
31 #include <linux/sizes.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/cma.h>
35 #include <linux/highmem.h>
38 unsigned long base_pfn;
40 unsigned long *bitmap;
41 unsigned int order_per_bit; /* Order of pages represented by one bit */
45 static struct cma cma_areas[MAX_CMA_AREAS];
46 static unsigned cma_area_count;
47 static DEFINE_MUTEX(cma_mutex);
49 phys_addr_t cma_get_base(struct cma *cma)
51 return PFN_PHYS(cma->base_pfn);
54 unsigned long cma_get_size(struct cma *cma)
56 return cma->count << PAGE_SHIFT;
59 static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
61 if (align_order <= cma->order_per_bit)
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
66 static unsigned long cma_bitmap_maxno(struct cma *cma)
68 return cma->count >> cma->order_per_bit;
71 static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
74 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
77 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
79 unsigned long bitmap_no, bitmap_count;
81 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
82 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
84 mutex_lock(&cma->lock);
85 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
86 mutex_unlock(&cma->lock);
89 static int __init cma_activate_area(struct cma *cma)
91 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
92 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
93 unsigned i = cma->count >> pageblock_order;
96 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
101 WARN_ON_ONCE(!pfn_valid(pfn));
102 zone = page_zone(pfn_to_page(pfn));
108 for (j = pageblock_nr_pages; j; --j, pfn++) {
109 WARN_ON_ONCE(!pfn_valid(pfn));
111 * alloc_contig_range requires the pfn range
112 * specified to be in the same zone. Make this
113 * simple by forcing the entire CMA resv range
114 * to be in the same zone.
116 if (page_zone(pfn_to_page(pfn)) != zone)
119 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
122 mutex_init(&cma->lock);
130 static int __init cma_init_reserved_areas(void)
134 for (i = 0; i < cma_area_count; i++) {
135 int ret = cma_activate_area(&cma_areas[i]);
143 core_initcall(cma_init_reserved_areas);
146 * cma_declare_contiguous() - reserve custom contiguous area
147 * @base: Base address of the reserved area optional, use 0 for any
148 * @size: Size of the reserved area (in bytes),
149 * @limit: End address of the reserved memory (optional, 0 for any).
150 * @alignment: Alignment for the CMA area, should be power of 2 or zero
151 * @order_per_bit: Order of pages represented by one bit on bitmap.
152 * @fixed: hint about where to place the reserved area
153 * @res_cma: Pointer to store the created cma region.
155 * This function reserves memory from early allocator. It should be
156 * called by arch specific code once the early allocator (memblock or bootmem)
157 * has been activated and all other subsystems have already allocated/reserved
158 * memory. This function allows to create custom reserved areas.
160 * If @fixed is true, reserve contiguous area at exactly @base. If false,
161 * reserve in range from @base to @limit.
163 int __init cma_declare_contiguous(phys_addr_t base,
164 phys_addr_t size, phys_addr_t limit,
165 phys_addr_t alignment, unsigned int order_per_bit,
166 bool fixed, struct cma **res_cma)
169 phys_addr_t memblock_end = memblock_end_of_DRAM();
170 phys_addr_t highmem_start = __pa(high_memory);
173 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
174 __func__, (unsigned long)size, (unsigned long)base,
175 (unsigned long)limit, (unsigned long)alignment);
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
185 if (alignment && !is_power_of_2(alignment))
189 * Sanitise input arguments.
190 * Pages both ends in CMA area could be merged into adjacent unmovable
191 * migratetype page by page allocator's buddy algorithm. In the case,
192 * you couldn't get a contiguous memory, which is not what we want.
194 alignment = max(alignment,
195 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
196 base = ALIGN(base, alignment);
197 size = ALIGN(size, alignment);
198 limit &= ~(alignment - 1);
200 /* size should be aligned with order_per_bit */
201 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
205 * adjust limit to avoid crossing low/high memory boundary for
206 * automatically allocated regions
208 if (((limit == 0 || limit > memblock_end) &&
209 (memblock_end - size < highmem_start &&
210 memblock_end > highmem_start)) ||
211 (!fixed && limit > highmem_start && limit - size < highmem_start)) {
212 limit = highmem_start;
215 if (fixed && base < highmem_start && base+size > highmem_start) {
217 pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
218 (unsigned long)base, (unsigned long)highmem_start);
224 if (memblock_is_region_reserved(base, size) ||
225 memblock_reserve(base, size) < 0) {
230 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
241 * Each reserved area must be initialised later, when more kernel
242 * subsystems (like slab allocator) are available.
244 cma = &cma_areas[cma_area_count];
245 cma->base_pfn = PFN_DOWN(base);
246 cma->count = size >> PAGE_SHIFT;
247 cma->order_per_bit = order_per_bit;
251 pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
252 (unsigned long)base);
256 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
261 * cma_alloc() - allocate pages from contiguous area
262 * @cma: Contiguous memory region for which the allocation is performed.
263 * @count: Requested number of pages.
264 * @align: Requested alignment of pages (in PAGE_SIZE order).
266 * This function allocates part of contiguous memory on specific
267 * contiguous memory area.
269 struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
271 unsigned long mask, pfn, start = 0;
272 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
273 struct page *page = NULL;
276 if (!cma || !cma->count)
279 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
285 mask = cma_bitmap_aligned_mask(cma, align);
286 bitmap_maxno = cma_bitmap_maxno(cma);
287 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
290 mutex_lock(&cma->lock);
291 bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
292 bitmap_maxno, start, bitmap_count, mask);
293 if (bitmap_no >= bitmap_maxno) {
294 mutex_unlock(&cma->lock);
297 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
299 * It's safe to drop the lock here. We've marked this region for
300 * our exclusive use. If the migration fails we will take the
301 * lock again and unmark it.
303 mutex_unlock(&cma->lock);
305 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
306 mutex_lock(&cma_mutex);
307 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
308 mutex_unlock(&cma_mutex);
310 page = pfn_to_page(pfn);
314 cma_clear_bitmap(cma, pfn, count);
318 pr_debug("%s(): memory range at %p is busy, retrying\n",
319 __func__, pfn_to_page(pfn));
320 /* try again with a bit different memory target */
321 start = bitmap_no + mask + 1;
324 pr_debug("%s(): returned %p\n", __func__, page);
329 * cma_release() - release allocated pages
330 * @cma: Contiguous memory region for which the allocation is performed.
331 * @pages: Allocated pages.
332 * @count: Number of allocated pages.
334 * This function releases memory allocated by alloc_cma().
335 * It returns false when provided pages do not belong to contiguous area and
338 bool cma_release(struct cma *cma, struct page *pages, int count)
345 pr_debug("%s(page %p)\n", __func__, (void *)pages);
347 pfn = page_to_pfn(pages);
349 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
352 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
354 free_contig_range(pfn, count);
355 cma_clear_bitmap(cma, pfn, count);