1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle;
28 static void dmam_release(struct device *dev, void *res)
30 struct dma_devres *this = res;
32 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 static int dmam_match(struct device *dev, void *res, void *match_data)
38 struct dma_devres *this = res, *match = match_data;
40 if (this->vaddr == match->vaddr) {
41 WARN_ON(this->size != match->size ||
42 this->dma_handle != match->dma_handle);
49 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
55 * Managed dma_free_coherent().
57 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
58 dma_addr_t dma_handle)
60 struct dma_devres match_data = { size, vaddr, dma_handle };
62 dma_free_coherent(dev, size, vaddr, dma_handle);
63 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
65 EXPORT_SYMBOL(dmam_free_coherent);
68 * dmam_alloc_attrs - Managed dma_alloc_attrs()
69 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
73 * @attrs: Flags in the DMA_ATTR_* namespace.
75 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
79 * Pointer to allocated memory on success, NULL on failure.
81 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
82 gfp_t gfp, unsigned long attrs)
84 struct dma_devres *dr;
87 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
98 dr->dma_handle = *dma_handle;
106 EXPORT_SYMBOL(dmam_alloc_attrs);
108 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
110 static void dmam_coherent_decl_release(struct device *dev, void *res)
112 dma_release_declared_memory(dev);
116 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
117 * @dev: Device to declare coherent memory for
118 * @phys_addr: Physical address of coherent memory to be declared
119 * @device_addr: Device address of coherent memory to be declared
120 * @size: Size of coherent memory to be declared
123 * Managed dma_declare_coherent_memory().
126 * 0 on success, -errno on failure.
128 int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
129 dma_addr_t device_addr, size_t size, int flags)
134 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
138 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
141 devres_add(dev, res);
147 EXPORT_SYMBOL(dmam_declare_coherent_memory);
150 * dmam_release_declared_memory - Managed dma_release_declared_memory().
151 * @dev: Device to release declared coherent memory for
153 * Managed dmam_release_declared_memory().
155 void dmam_release_declared_memory(struct device *dev)
157 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
159 EXPORT_SYMBOL(dmam_release_declared_memory);
164 * Create scatter-list for the already allocated DMA buffer.
166 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
167 void *cpu_addr, dma_addr_t dma_addr, size_t size,
173 if (!dev_is_dma_coherent(dev)) {
174 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
177 page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
180 page = virt_to_page(cpu_addr);
183 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
185 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
189 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
190 void *cpu_addr, dma_addr_t dma_addr, size_t size,
193 const struct dma_map_ops *ops = get_dma_ops(dev);
195 if (!dma_is_direct(ops) && ops->get_sgtable)
196 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
198 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
201 EXPORT_SYMBOL(dma_get_sgtable_attrs);
204 * Create userspace mapping for the DMA-coherent memory.
206 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
207 void *cpu_addr, dma_addr_t dma_addr, size_t size,
210 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
211 unsigned long user_count = vma_pages(vma);
212 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
213 unsigned long off = vma->vm_pgoff;
217 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
219 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
222 if (off >= count || user_count > count - off)
225 if (!dev_is_dma_coherent(dev)) {
226 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
228 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
230 pfn = page_to_pfn(virt_to_page(cpu_addr));
233 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
234 user_count << PAGE_SHIFT, vma->vm_page_prot);
237 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
241 * dma_mmap_attrs - map a coherent DMA allocation into user space
242 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
243 * @vma: vm_area_struct describing requested user mapping
244 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
245 * @dma_addr: device-view address returned from dma_alloc_attrs
246 * @size: size of memory originally requested in dma_alloc_attrs
247 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
249 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
250 * space. The coherent DMA buffer must not be freed by the driver until the
251 * user space mapping has been released.
253 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
254 void *cpu_addr, dma_addr_t dma_addr, size_t size,
257 const struct dma_map_ops *ops = get_dma_ops(dev);
259 if (!dma_is_direct(ops) && ops->mmap)
260 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
261 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
263 EXPORT_SYMBOL(dma_mmap_attrs);
265 #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
266 static u64 dma_default_get_required_mask(struct device *dev)
268 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
269 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
272 if (!high_totalram) {
273 /* convert to mask just covering totalram */
274 low_totalram = (1 << (fls(low_totalram) - 1));
275 low_totalram += low_totalram - 1;
278 high_totalram = (1 << (fls(high_totalram) - 1));
279 high_totalram += high_totalram - 1;
280 mask = (((u64)high_totalram) << 32) + 0xffffffff;
285 u64 dma_get_required_mask(struct device *dev)
287 const struct dma_map_ops *ops = get_dma_ops(dev);
289 if (dma_is_direct(ops))
290 return dma_direct_get_required_mask(dev);
291 if (ops->get_required_mask)
292 return ops->get_required_mask(dev);
293 return dma_default_get_required_mask(dev);
295 EXPORT_SYMBOL_GPL(dma_get_required_mask);
298 #ifndef arch_dma_alloc_attrs
299 #define arch_dma_alloc_attrs(dev) (true)
302 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
303 gfp_t flag, unsigned long attrs)
305 const struct dma_map_ops *ops = get_dma_ops(dev);
308 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
310 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
313 /* let the implementation decide on the zone to allocate from: */
314 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
316 if (!arch_dma_alloc_attrs(&dev))
319 if (dma_is_direct(ops))
320 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
322 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
326 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
329 EXPORT_SYMBOL(dma_alloc_attrs);
331 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
332 dma_addr_t dma_handle, unsigned long attrs)
334 const struct dma_map_ops *ops = get_dma_ops(dev);
336 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
339 * On non-coherent platforms which implement DMA-coherent buffers via
340 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
341 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
342 * sleep on some machines, and b) an indication that the driver is
343 * probably misusing the coherent API anyway.
345 WARN_ON(irqs_disabled());
350 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
351 if (dma_is_direct(ops))
352 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
354 ops->free(dev, size, cpu_addr, dma_handle, attrs);
356 EXPORT_SYMBOL(dma_free_attrs);
358 static inline void dma_check_mask(struct device *dev, u64 mask)
360 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
361 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
364 int dma_supported(struct device *dev, u64 mask)
366 const struct dma_map_ops *ops = get_dma_ops(dev);
368 if (dma_is_direct(ops))
369 return dma_direct_supported(dev, mask);
370 if (!ops->dma_supported)
372 return ops->dma_supported(dev, mask);
374 EXPORT_SYMBOL(dma_supported);
376 #ifndef HAVE_ARCH_DMA_SET_MASK
377 int dma_set_mask(struct device *dev, u64 mask)
379 if (!dev->dma_mask || !dma_supported(dev, mask))
382 dma_check_mask(dev, mask);
383 *dev->dma_mask = mask;
386 EXPORT_SYMBOL(dma_set_mask);
389 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
390 int dma_set_coherent_mask(struct device *dev, u64 mask)
392 if (!dma_supported(dev, mask))
395 dma_check_mask(dev, mask);
396 dev->coherent_dma_mask = mask;
399 EXPORT_SYMBOL(dma_set_coherent_mask);
402 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
403 enum dma_data_direction dir)
405 const struct dma_map_ops *ops = get_dma_ops(dev);
407 BUG_ON(!valid_dma_direction(dir));
409 if (dma_is_direct(ops))
410 arch_dma_cache_sync(dev, vaddr, size, dir);
411 else if (ops->cache_sync)
412 ops->cache_sync(dev, vaddr, size, dir);
414 EXPORT_SYMBOL(dma_cache_sync);