1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks across all pages. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/poison.h>
30 #include <linux/sched.h>
31 #include <linux/sched/mm.h>
32 #include <linux/slab.h>
33 #include <linux/stat.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/wait.h>
39 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40 #define DMAPOOL_DEBUG 1
44 struct dma_block *next_block;
48 struct dma_pool { /* the pool */
49 struct list_head page_list;
51 struct dma_block *next_block;
57 unsigned int allocation;
58 unsigned int boundary;
60 struct list_head pools;
63 struct dma_page { /* cacheable header for 'allocation' bytes */
64 struct list_head page_list;
69 static DEFINE_MUTEX(pools_lock);
70 static DEFINE_MUTEX(pools_reg_lock);
72 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
74 struct dma_pool *pool;
77 size = sysfs_emit(buf, "poolinfo - 0.1\n");
79 mutex_lock(&pools_lock);
80 list_for_each_entry(pool, &dev->dma_pools, pools) {
81 /* per-pool info, no real statistics yet */
82 size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
83 pool->name, pool->nr_active,
84 pool->nr_blocks, pool->size,
87 mutex_unlock(&pools_lock);
92 static DEVICE_ATTR_RO(pools);
95 static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
98 u8 *data = (void *)block;
101 for (i = sizeof(struct dma_block); i < pool->size; i++) {
102 if (data[i] == POOL_POISON_FREED)
104 dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
108 * Dump the first 4 bytes even if they are not
111 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
112 data, pool->size, 1);
116 if (!want_init_on_alloc(mem_flags))
117 memset(block, POOL_POISON_ALLOCATED, pool->size);
120 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
122 struct dma_page *page;
124 list_for_each_entry(page, &pool->page_list, page_list) {
127 if ((dma - page->dma) < pool->allocation)
133 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
135 struct dma_block *block = pool->next_block;
136 struct dma_page *page;
138 page = pool_find_page(pool, dma);
140 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
141 __func__, pool->name, vaddr, &dma);
146 if (block != vaddr) {
147 block = block->next_block;
150 dev_err(pool->dev, "%s %s, dma %pad already free\n",
151 __func__, pool->name, &dma);
155 memset(vaddr, POOL_POISON_FREED, pool->size);
159 static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
161 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
164 static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
169 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
171 if (want_init_on_free())
172 memset(vaddr, 0, pool->size);
176 static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
181 static struct dma_block *pool_block_pop(struct dma_pool *pool)
183 struct dma_block *block = pool->next_block;
186 pool->next_block = block->next_block;
192 static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
196 block->next_block = pool->next_block;
197 pool->next_block = block;
202 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203 * @name: name of pool, for diagnostics
204 * @dev: device that will be doing the DMA
205 * @size: size of the blocks in this pool.
206 * @align: alignment requirement for blocks; must be a power of two
207 * @boundary: returned blocks won't cross this power of two boundary
208 * Context: not in_interrupt()
210 * Given one of these pools, dma_pool_alloc()
211 * may be used to allocate memory. Such memory will all have "consistent"
212 * DMA mappings, accessible by the device and its driver without using
213 * cache flushing primitives. The actual size of blocks allocated may be
214 * larger than requested because of alignment.
216 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217 * cross that size boundary. This is useful for devices which have
218 * addressing restrictions on individual DMA transfers, such as not crossing
219 * boundaries of 4KBytes.
221 * Return: a dma allocation pool with the requested characteristics, or
222 * %NULL if one can't be created.
224 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
225 size_t size, size_t align, size_t boundary)
227 struct dma_pool *retval;
236 else if (align & (align - 1))
239 if (size == 0 || size > INT_MAX)
241 if (size < sizeof(struct dma_block))
242 size = sizeof(struct dma_block);
244 size = ALIGN(size, align);
245 allocation = max_t(size_t, size, PAGE_SIZE);
248 boundary = allocation;
249 else if ((boundary < size) || (boundary & (boundary - 1)))
252 boundary = min(boundary, allocation);
254 retval = kzalloc(sizeof(*retval), GFP_KERNEL);
258 strscpy(retval->name, name, sizeof(retval->name));
262 INIT_LIST_HEAD(&retval->page_list);
263 spin_lock_init(&retval->lock);
265 retval->boundary = boundary;
266 retval->allocation = allocation;
267 INIT_LIST_HEAD(&retval->pools);
270 * pools_lock ensures that the ->dma_pools list does not get corrupted.
271 * pools_reg_lock ensures that there is not a race between
272 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
273 * when the first invocation of dma_pool_create() failed on
274 * device_create_file() and the second assumes that it has been done (I
275 * know it is a short window).
277 mutex_lock(&pools_reg_lock);
278 mutex_lock(&pools_lock);
279 if (list_empty(&dev->dma_pools))
281 list_add(&retval->pools, &dev->dma_pools);
282 mutex_unlock(&pools_lock);
286 err = device_create_file(dev, &dev_attr_pools);
288 mutex_lock(&pools_lock);
289 list_del(&retval->pools);
290 mutex_unlock(&pools_lock);
291 mutex_unlock(&pools_reg_lock);
296 mutex_unlock(&pools_reg_lock);
299 EXPORT_SYMBOL(dma_pool_create);
301 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
303 unsigned int next_boundary = pool->boundary, offset = 0;
304 struct dma_block *block, *first = NULL, *last = NULL;
306 pool_init_page(pool, page);
307 while (offset + pool->size <= pool->allocation) {
308 if (offset + pool->size > next_boundary) {
309 offset = next_boundary;
310 next_boundary += pool->boundary;
314 block = page->vaddr + offset;
315 block->dma = page->dma + offset;
316 block->next_block = NULL;
319 last->next_block = block;
324 offset += pool->size;
328 last->next_block = pool->next_block;
329 pool->next_block = first;
331 list_add(&page->page_list, &pool->page_list);
335 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
337 struct dma_page *page;
339 page = kmalloc(sizeof(*page), mem_flags);
343 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
344 &page->dma, mem_flags);
354 * dma_pool_destroy - destroys a pool of dma memory blocks.
355 * @pool: dma pool that will be destroyed
356 * Context: !in_interrupt()
358 * Caller guarantees that no more memory from the pool is in use,
359 * and that nothing will try to use the pool after this call.
361 void dma_pool_destroy(struct dma_pool *pool)
363 struct dma_page *page, *tmp;
364 bool empty = false, busy = false;
369 mutex_lock(&pools_reg_lock);
370 mutex_lock(&pools_lock);
371 list_del(&pool->pools);
372 if (list_empty(&pool->dev->dma_pools))
374 mutex_unlock(&pools_lock);
376 device_remove_file(pool->dev, &dev_attr_pools);
377 mutex_unlock(&pools_reg_lock);
379 if (pool->nr_active) {
380 dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
384 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
386 dma_free_coherent(pool->dev, pool->allocation,
387 page->vaddr, page->dma);
388 list_del(&page->page_list);
394 EXPORT_SYMBOL(dma_pool_destroy);
397 * dma_pool_alloc - get a block of consistent memory
398 * @pool: dma pool that will produce the block
399 * @mem_flags: GFP_* bitmask
400 * @handle: pointer to dma address of block
402 * Return: the kernel virtual address of a currently unused block,
403 * and reports its dma address through the handle.
404 * If such a memory block can't be allocated, %NULL is returned.
406 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
409 struct dma_block *block;
410 struct dma_page *page;
413 might_alloc(mem_flags);
415 spin_lock_irqsave(&pool->lock, flags);
416 block = pool_block_pop(pool);
419 * pool_alloc_page() might sleep, so temporarily drop
422 spin_unlock_irqrestore(&pool->lock, flags);
424 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
428 spin_lock_irqsave(&pool->lock, flags);
429 pool_initialise_page(pool, page);
430 block = pool_block_pop(pool);
432 spin_unlock_irqrestore(&pool->lock, flags);
434 *handle = block->dma;
435 pool_check_block(pool, block, mem_flags);
436 if (want_init_on_alloc(mem_flags))
437 memset(block, 0, pool->size);
441 EXPORT_SYMBOL(dma_pool_alloc);
444 * dma_pool_free - put block back into dma pool
445 * @pool: the dma pool holding the block
446 * @vaddr: virtual address of block
447 * @dma: dma address of block
449 * Caller promises neither device nor driver will again touch this block
450 * unless it is first re-allocated.
452 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
454 struct dma_block *block = vaddr;
457 spin_lock_irqsave(&pool->lock, flags);
458 if (!pool_block_err(pool, vaddr, dma)) {
459 pool_block_push(pool, block, dma);
462 spin_unlock_irqrestore(&pool->lock, flags);
464 EXPORT_SYMBOL(dma_pool_free);
469 static void dmam_pool_release(struct device *dev, void *res)
471 struct dma_pool *pool = *(struct dma_pool **)res;
473 dma_pool_destroy(pool);
476 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
478 return *(struct dma_pool **)res == match_data;
482 * dmam_pool_create - Managed dma_pool_create()
483 * @name: name of pool, for diagnostics
484 * @dev: device that will be doing the DMA
485 * @size: size of the blocks in this pool.
486 * @align: alignment requirement for blocks; must be a power of two
487 * @allocation: returned blocks won't cross this boundary (or zero)
489 * Managed dma_pool_create(). DMA pool created with this function is
490 * automatically destroyed on driver detach.
492 * Return: a managed dma allocation pool with the requested
493 * characteristics, or %NULL if one can't be created.
495 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
496 size_t size, size_t align, size_t allocation)
498 struct dma_pool **ptr, *pool;
500 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
506 devres_add(dev, ptr);
512 EXPORT_SYMBOL(dmam_pool_create);
515 * dmam_pool_destroy - Managed dma_pool_destroy()
516 * @pool: dma pool that will be destroyed
518 * Managed dma_pool_destroy().
520 void dmam_pool_destroy(struct dma_pool *pool)
522 struct device *dev = pool->dev;
524 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
526 EXPORT_SYMBOL(dmam_pool_destroy);