1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle;
28 static void dmam_release(struct device *dev, void *res)
30 struct dma_devres *this = res;
32 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 static int dmam_match(struct device *dev, void *res, void *match_data)
38 struct dma_devres *this = res, *match = match_data;
40 if (this->vaddr == match->vaddr) {
41 WARN_ON(this->size != match->size ||
42 this->dma_handle != match->dma_handle);
49 * dmam_alloc_coherent - Managed dma_alloc_coherent()
50 * @dev: Device to allocate coherent memory for
51 * @size: Size of allocation
52 * @dma_handle: Out argument for allocated DMA handle
53 * @gfp: Allocation flags
55 * Managed dma_alloc_coherent(). Memory allocated using this function
56 * will be automatically released on driver detach.
59 * Pointer to allocated memory on success, NULL on failure.
61 void *dmam_alloc_coherent(struct device *dev, size_t size,
62 dma_addr_t *dma_handle, gfp_t gfp)
64 struct dma_devres *dr;
67 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
71 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
78 dr->dma_handle = *dma_handle;
85 EXPORT_SYMBOL(dmam_alloc_coherent);
88 * dmam_free_coherent - Managed dma_free_coherent()
89 * @dev: Device to free coherent memory for
90 * @size: Size of allocation
91 * @vaddr: Virtual address of the memory to free
92 * @dma_handle: DMA handle of the memory to free
94 * Managed dma_free_coherent().
96 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
97 dma_addr_t dma_handle)
99 struct dma_devres match_data = { size, vaddr, dma_handle };
101 dma_free_coherent(dev, size, vaddr, dma_handle);
102 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
104 EXPORT_SYMBOL(dmam_free_coherent);
107 * dmam_alloc_attrs - Managed dma_alloc_attrs()
108 * @dev: Device to allocate non_coherent memory for
109 * @size: Size of allocation
110 * @dma_handle: Out argument for allocated DMA handle
111 * @gfp: Allocation flags
112 * @attrs: Flags in the DMA_ATTR_* namespace.
114 * Managed dma_alloc_attrs(). Memory allocated using this function will be
115 * automatically released on driver detach.
118 * Pointer to allocated memory on success, NULL on failure.
120 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121 gfp_t gfp, unsigned long attrs)
123 struct dma_devres *dr;
126 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
130 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
137 dr->dma_handle = *dma_handle;
145 EXPORT_SYMBOL(dmam_alloc_attrs);
147 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
149 static void dmam_coherent_decl_release(struct device *dev, void *res)
151 dma_release_declared_memory(dev);
155 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
156 * @dev: Device to declare coherent memory for
157 * @phys_addr: Physical address of coherent memory to be declared
158 * @device_addr: Device address of coherent memory to be declared
159 * @size: Size of coherent memory to be declared
162 * Managed dma_declare_coherent_memory().
165 * 0 on success, -errno on failure.
167 int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
168 dma_addr_t device_addr, size_t size, int flags)
173 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
177 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
180 devres_add(dev, res);
186 EXPORT_SYMBOL(dmam_declare_coherent_memory);
189 * dmam_release_declared_memory - Managed dma_release_declared_memory().
190 * @dev: Device to release declared coherent memory for
192 * Managed dmam_release_declared_memory().
194 void dmam_release_declared_memory(struct device *dev)
196 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
198 EXPORT_SYMBOL(dmam_release_declared_memory);
203 * Create scatter-list for the already allocated DMA buffer.
205 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
206 void *cpu_addr, dma_addr_t dma_addr, size_t size,
212 if (!dev_is_dma_coherent(dev)) {
213 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
216 page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
219 page = virt_to_page(cpu_addr);
222 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
224 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
228 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
229 void *cpu_addr, dma_addr_t dma_addr, size_t size,
232 const struct dma_map_ops *ops = get_dma_ops(dev);
234 if (!dma_is_direct(ops) && ops->get_sgtable)
235 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
237 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
240 EXPORT_SYMBOL(dma_get_sgtable_attrs);
243 * Create userspace mapping for the DMA-coherent memory.
245 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
246 void *cpu_addr, dma_addr_t dma_addr, size_t size,
249 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
250 unsigned long user_count = vma_pages(vma);
251 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
252 unsigned long off = vma->vm_pgoff;
256 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
258 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
261 if (off >= count || user_count > count - off)
264 if (!dev_is_dma_coherent(dev)) {
265 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
267 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
269 pfn = page_to_pfn(virt_to_page(cpu_addr));
272 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
273 user_count << PAGE_SHIFT, vma->vm_page_prot);
276 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
280 * dma_mmap_attrs - map a coherent DMA allocation into user space
281 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
282 * @vma: vm_area_struct describing requested user mapping
283 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
284 * @dma_addr: device-view address returned from dma_alloc_attrs
285 * @size: size of memory originally requested in dma_alloc_attrs
286 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
288 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
289 * space. The coherent DMA buffer must not be freed by the driver until the
290 * user space mapping has been released.
292 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
293 void *cpu_addr, dma_addr_t dma_addr, size_t size,
296 const struct dma_map_ops *ops = get_dma_ops(dev);
298 if (!dma_is_direct(ops) && ops->mmap)
299 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
300 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
302 EXPORT_SYMBOL(dma_mmap_attrs);
304 #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
305 static u64 dma_default_get_required_mask(struct device *dev)
307 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
308 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
311 if (!high_totalram) {
312 /* convert to mask just covering totalram */
313 low_totalram = (1 << (fls(low_totalram) - 1));
314 low_totalram += low_totalram - 1;
317 high_totalram = (1 << (fls(high_totalram) - 1));
318 high_totalram += high_totalram - 1;
319 mask = (((u64)high_totalram) << 32) + 0xffffffff;
324 u64 dma_get_required_mask(struct device *dev)
326 const struct dma_map_ops *ops = get_dma_ops(dev);
328 if (dma_is_direct(ops))
329 return dma_direct_get_required_mask(dev);
330 if (ops->get_required_mask)
331 return ops->get_required_mask(dev);
332 return dma_default_get_required_mask(dev);
334 EXPORT_SYMBOL_GPL(dma_get_required_mask);
337 #ifndef arch_dma_alloc_attrs
338 #define arch_dma_alloc_attrs(dev) (true)
341 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
342 gfp_t flag, unsigned long attrs)
344 const struct dma_map_ops *ops = get_dma_ops(dev);
347 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
349 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
352 /* let the implementation decide on the zone to allocate from: */
353 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
355 if (!arch_dma_alloc_attrs(&dev))
358 if (dma_is_direct(ops))
359 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
361 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
365 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
368 EXPORT_SYMBOL(dma_alloc_attrs);
370 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
371 dma_addr_t dma_handle, unsigned long attrs)
373 const struct dma_map_ops *ops = get_dma_ops(dev);
375 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
378 * On non-coherent platforms which implement DMA-coherent buffers via
379 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
380 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
381 * sleep on some machines, and b) an indication that the driver is
382 * probably misusing the coherent API anyway.
384 WARN_ON(irqs_disabled());
389 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
390 if (dma_is_direct(ops))
391 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
393 ops->free(dev, size, cpu_addr, dma_handle, attrs);
395 EXPORT_SYMBOL(dma_free_attrs);
397 static inline void dma_check_mask(struct device *dev, u64 mask)
399 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
400 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
403 int dma_supported(struct device *dev, u64 mask)
405 const struct dma_map_ops *ops = get_dma_ops(dev);
407 if (dma_is_direct(ops))
408 return dma_direct_supported(dev, mask);
409 if (!ops->dma_supported)
411 return ops->dma_supported(dev, mask);
413 EXPORT_SYMBOL(dma_supported);
415 #ifndef HAVE_ARCH_DMA_SET_MASK
416 int dma_set_mask(struct device *dev, u64 mask)
418 if (!dev->dma_mask || !dma_supported(dev, mask))
421 dma_check_mask(dev, mask);
422 *dev->dma_mask = mask;
425 EXPORT_SYMBOL(dma_set_mask);
428 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
429 int dma_set_coherent_mask(struct device *dev, u64 mask)
431 if (!dma_supported(dev, mask))
434 dma_check_mask(dev, mask);
435 dev->coherent_dma_mask = mask;
438 EXPORT_SYMBOL(dma_set_coherent_mask);
441 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
442 enum dma_data_direction dir)
444 const struct dma_map_ops *ops = get_dma_ops(dev);
446 BUG_ON(!valid_dma_direction(dir));
448 if (dma_is_direct(ops))
449 arch_dma_cache_sync(dev, vaddr, size, dir);
450 else if (ops->cache_sync)
451 ops->cache_sync(dev, vaddr, size, dir);
453 EXPORT_SYMBOL(dma_cache_sync);