1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
22 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
27 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
32 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
37 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
42 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
48 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
53 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
59 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
64 #define DMA_ATTR_NO_WARN (1UL << 8)
67 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
71 #define DMA_ATTR_PRIVILEGED (1UL << 9)
74 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
80 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
83 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
86 int (*mmap)(struct device *, struct vm_area_struct *,
87 void *, dma_addr_t, size_t,
90 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
91 dma_addr_t, size_t, unsigned long attrs);
93 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
97 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
106 unsigned long attrs);
107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
110 unsigned long attrs);
111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
131 int (*dma_supported)(struct device *dev, u64 mask);
132 u64 (*get_required_mask)(struct device *dev);
135 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
137 extern const struct dma_map_ops dma_direct_ops;
138 extern const struct dma_map_ops dma_virt_ops;
140 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
142 #define DMA_MASK_NONE 0x0ULL
144 static inline int valid_dma_direction(int dma_direction)
146 return ((dma_direction == DMA_BIDIRECTIONAL) ||
147 (dma_direction == DMA_TO_DEVICE) ||
148 (dma_direction == DMA_FROM_DEVICE));
151 static inline int is_device_dma_capable(struct device *dev)
153 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
158 * These three functions are only for dma allocator.
159 * Don't use them in device drivers.
161 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
162 dma_addr_t *dma_handle, void **ret);
163 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
165 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
166 void *cpu_addr, size_t size, int *ret);
168 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
169 int dma_release_from_global_coherent(int order, void *vaddr);
170 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
171 size_t size, int *ret);
174 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
175 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
176 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
178 static inline void *dma_alloc_from_global_coherent(ssize_t size,
179 dma_addr_t *dma_handle)
184 static inline int dma_release_from_global_coherent(int order, void *vaddr)
189 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
190 void *cpu_addr, size_t size,
195 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
197 #ifdef CONFIG_HAS_DMA
198 #include <asm/dma-mapping.h>
199 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
201 if (dev && dev->dma_ops)
203 return get_arch_dma_ops(dev ? dev->bus : NULL);
206 static inline void set_dma_ops(struct device *dev,
207 const struct dma_map_ops *dma_ops)
209 dev->dma_ops = dma_ops;
213 * Define the dma api to allow compilation of dma dependent code.
214 * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
215 * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
216 * where <something> guarantuees the availability of the dma-mapping API.
218 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
224 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
226 enum dma_data_direction dir,
229 const struct dma_map_ops *ops = get_dma_ops(dev);
232 BUG_ON(!valid_dma_direction(dir));
233 debug_dma_map_single(dev, ptr, size);
234 addr = ops->map_page(dev, virt_to_page(ptr),
235 offset_in_page(ptr), size,
237 debug_dma_map_page(dev, virt_to_page(ptr),
238 offset_in_page(ptr), size,
243 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
245 enum dma_data_direction dir,
248 const struct dma_map_ops *ops = get_dma_ops(dev);
250 BUG_ON(!valid_dma_direction(dir));
252 ops->unmap_page(dev, addr, size, dir, attrs);
253 debug_dma_unmap_page(dev, addr, size, dir, true);
257 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
258 * It should never return a value < 0.
260 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
261 int nents, enum dma_data_direction dir,
264 const struct dma_map_ops *ops = get_dma_ops(dev);
267 BUG_ON(!valid_dma_direction(dir));
268 ents = ops->map_sg(dev, sg, nents, dir, attrs);
270 debug_dma_map_sg(dev, sg, nents, ents, dir);
275 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
276 int nents, enum dma_data_direction dir,
279 const struct dma_map_ops *ops = get_dma_ops(dev);
281 BUG_ON(!valid_dma_direction(dir));
282 debug_dma_unmap_sg(dev, sg, nents, dir);
284 ops->unmap_sg(dev, sg, nents, dir, attrs);
287 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
289 size_t offset, size_t size,
290 enum dma_data_direction dir,
293 const struct dma_map_ops *ops = get_dma_ops(dev);
296 BUG_ON(!valid_dma_direction(dir));
297 addr = ops->map_page(dev, page, offset, size, dir, attrs);
298 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
303 static inline void dma_unmap_page_attrs(struct device *dev,
304 dma_addr_t addr, size_t size,
305 enum dma_data_direction dir,
308 const struct dma_map_ops *ops = get_dma_ops(dev);
310 BUG_ON(!valid_dma_direction(dir));
312 ops->unmap_page(dev, addr, size, dir, attrs);
313 debug_dma_unmap_page(dev, addr, size, dir, false);
316 static inline dma_addr_t dma_map_resource(struct device *dev,
317 phys_addr_t phys_addr,
319 enum dma_data_direction dir,
322 const struct dma_map_ops *ops = get_dma_ops(dev);
325 BUG_ON(!valid_dma_direction(dir));
327 /* Don't allow RAM to be mapped */
328 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
331 if (ops->map_resource)
332 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
334 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
339 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
340 size_t size, enum dma_data_direction dir,
343 const struct dma_map_ops *ops = get_dma_ops(dev);
345 BUG_ON(!valid_dma_direction(dir));
346 if (ops->unmap_resource)
347 ops->unmap_resource(dev, addr, size, dir, attrs);
348 debug_dma_unmap_resource(dev, addr, size, dir);
351 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
353 enum dma_data_direction dir)
355 const struct dma_map_ops *ops = get_dma_ops(dev);
357 BUG_ON(!valid_dma_direction(dir));
358 if (ops->sync_single_for_cpu)
359 ops->sync_single_for_cpu(dev, addr, size, dir);
360 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
363 static inline void dma_sync_single_for_device(struct device *dev,
364 dma_addr_t addr, size_t size,
365 enum dma_data_direction dir)
367 const struct dma_map_ops *ops = get_dma_ops(dev);
369 BUG_ON(!valid_dma_direction(dir));
370 if (ops->sync_single_for_device)
371 ops->sync_single_for_device(dev, addr, size, dir);
372 debug_dma_sync_single_for_device(dev, addr, size, dir);
375 static inline void dma_sync_single_range_for_cpu(struct device *dev,
377 unsigned long offset,
379 enum dma_data_direction dir)
381 const struct dma_map_ops *ops = get_dma_ops(dev);
383 BUG_ON(!valid_dma_direction(dir));
384 if (ops->sync_single_for_cpu)
385 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
386 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
389 static inline void dma_sync_single_range_for_device(struct device *dev,
391 unsigned long offset,
393 enum dma_data_direction dir)
395 const struct dma_map_ops *ops = get_dma_ops(dev);
397 BUG_ON(!valid_dma_direction(dir));
398 if (ops->sync_single_for_device)
399 ops->sync_single_for_device(dev, addr + offset, size, dir);
400 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
404 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
405 int nelems, enum dma_data_direction dir)
407 const struct dma_map_ops *ops = get_dma_ops(dev);
409 BUG_ON(!valid_dma_direction(dir));
410 if (ops->sync_sg_for_cpu)
411 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
412 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
416 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
417 int nelems, enum dma_data_direction dir)
419 const struct dma_map_ops *ops = get_dma_ops(dev);
421 BUG_ON(!valid_dma_direction(dir));
422 if (ops->sync_sg_for_device)
423 ops->sync_sg_for_device(dev, sg, nelems, dir);
424 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
428 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
429 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
430 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
431 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
432 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
433 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
436 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
437 enum dma_data_direction dir)
439 const struct dma_map_ops *ops = get_dma_ops(dev);
441 BUG_ON(!valid_dma_direction(dir));
443 ops->cache_sync(dev, vaddr, size, dir);
446 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
447 void *cpu_addr, dma_addr_t dma_addr, size_t size,
448 unsigned long attrs);
450 void *dma_common_contiguous_remap(struct page *page, size_t size,
451 unsigned long vm_flags,
452 pgprot_t prot, const void *caller);
454 void *dma_common_pages_remap(struct page **pages, size_t size,
455 unsigned long vm_flags, pgprot_t prot,
457 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
459 int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
460 bool dma_in_atomic_pool(void *start, size_t size);
461 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
462 bool dma_free_from_pool(void *start, size_t size);
465 * dma_mmap_attrs - map a coherent DMA allocation into user space
466 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
467 * @vma: vm_area_struct describing requested user mapping
468 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
469 * @handle: device-view address returned from dma_alloc_attrs
470 * @size: size of memory originally requested in dma_alloc_attrs
471 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
473 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
474 * into user space. The coherent DMA buffer must not be freed by the
475 * driver until the user space mapping has been released.
478 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
479 dma_addr_t dma_addr, size_t size, unsigned long attrs)
481 const struct dma_map_ops *ops = get_dma_ops(dev);
484 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
485 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
488 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
491 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
492 dma_addr_t dma_addr, size_t size, unsigned long attrs);
495 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
496 dma_addr_t dma_addr, size_t size,
499 const struct dma_map_ops *ops = get_dma_ops(dev);
501 if (ops->get_sgtable)
502 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
504 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
508 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
510 #ifndef arch_dma_alloc_attrs
511 #define arch_dma_alloc_attrs(dev) (true)
514 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
515 dma_addr_t *dma_handle, gfp_t flag,
518 const struct dma_map_ops *ops = get_dma_ops(dev);
522 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
524 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
527 /* let the implementation decide on the zone to allocate from: */
528 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
530 if (!arch_dma_alloc_attrs(&dev))
535 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
536 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
540 static inline void dma_free_attrs(struct device *dev, size_t size,
541 void *cpu_addr, dma_addr_t dma_handle,
544 const struct dma_map_ops *ops = get_dma_ops(dev);
548 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
551 * On non-coherent platforms which implement DMA-coherent buffers via
552 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
553 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
554 * sleep on some machines, and b) an indication that the driver is
555 * probably misusing the coherent API anyway.
557 WARN_ON(irqs_disabled());
559 if (!ops->free || !cpu_addr)
562 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
563 ops->free(dev, size, cpu_addr, dma_handle, attrs);
566 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
567 dma_addr_t *dma_handle, gfp_t gfp)
570 return dma_alloc_attrs(dev, size, dma_handle, gfp,
571 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
574 static inline void dma_free_coherent(struct device *dev, size_t size,
575 void *cpu_addr, dma_addr_t dma_handle)
577 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
580 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
582 debug_dma_mapping_error(dev, dma_addr);
584 if (dma_addr == DMA_MAPPING_ERROR)
589 static inline void dma_check_mask(struct device *dev, u64 mask)
591 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
592 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
595 static inline int dma_supported(struct device *dev, u64 mask)
597 const struct dma_map_ops *ops = get_dma_ops(dev);
601 if (!ops->dma_supported)
603 return ops->dma_supported(dev, mask);
606 #ifndef HAVE_ARCH_DMA_SET_MASK
607 static inline int dma_set_mask(struct device *dev, u64 mask)
609 if (!dev->dma_mask || !dma_supported(dev, mask))
612 dma_check_mask(dev, mask);
614 *dev->dma_mask = mask;
619 static inline u64 dma_get_mask(struct device *dev)
621 if (dev && dev->dma_mask && *dev->dma_mask)
622 return *dev->dma_mask;
623 return DMA_BIT_MASK(32);
626 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
627 int dma_set_coherent_mask(struct device *dev, u64 mask);
629 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
631 if (!dma_supported(dev, mask))
634 dma_check_mask(dev, mask);
636 dev->coherent_dma_mask = mask;
642 * Set both the DMA mask and the coherent DMA mask to the same thing.
643 * Note that we don't check the return value from dma_set_coherent_mask()
644 * as the DMA API guarantees that the coherent DMA mask can be set to
645 * the same or smaller than the streaming DMA mask.
647 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
649 int rc = dma_set_mask(dev, mask);
651 dma_set_coherent_mask(dev, mask);
656 * Similar to the above, except it deals with the case where the device
657 * does not have dev->dma_mask appropriately setup.
659 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
661 dev->dma_mask = &dev->coherent_dma_mask;
662 return dma_set_mask_and_coherent(dev, mask);
665 extern u64 dma_get_required_mask(struct device *dev);
667 #ifndef arch_setup_dma_ops
668 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
669 u64 size, const struct iommu_ops *iommu,
673 #ifndef arch_teardown_dma_ops
674 static inline void arch_teardown_dma_ops(struct device *dev) { }
677 static inline unsigned int dma_get_max_seg_size(struct device *dev)
679 if (dev->dma_parms && dev->dma_parms->max_segment_size)
680 return dev->dma_parms->max_segment_size;
684 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
686 if (dev->dma_parms) {
687 dev->dma_parms->max_segment_size = size;
693 static inline unsigned long dma_get_seg_boundary(struct device *dev)
695 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
696 return dev->dma_parms->segment_boundary_mask;
697 return DMA_BIT_MASK(32);
700 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
702 if (dev->dma_parms) {
703 dev->dma_parms->segment_boundary_mask = mask;
710 static inline unsigned long dma_max_pfn(struct device *dev)
712 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
716 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
717 dma_addr_t *dma_handle, gfp_t flag)
719 void *ret = dma_alloc_coherent(dev, size, dma_handle,
724 static inline int dma_get_cache_alignment(void)
726 #ifdef ARCH_DMA_MINALIGN
727 return ARCH_DMA_MINALIGN;
732 /* flags for the coherent memory api */
733 #define DMA_MEMORY_EXCLUSIVE 0x01
735 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
736 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
737 dma_addr_t device_addr, size_t size, int flags);
738 void dma_release_declared_memory(struct device *dev);
739 void *dma_mark_declared_memory_occupied(struct device *dev,
740 dma_addr_t device_addr, size_t size);
743 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
744 dma_addr_t device_addr, size_t size, int flags)
750 dma_release_declared_memory(struct device *dev)
755 dma_mark_declared_memory_occupied(struct device *dev,
756 dma_addr_t device_addr, size_t size)
758 return ERR_PTR(-EBUSY);
760 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
765 #ifdef CONFIG_HAS_DMA
766 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
767 dma_addr_t *dma_handle, gfp_t gfp);
768 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
769 dma_addr_t dma_handle);
770 #else /* !CONFIG_HAS_DMA */
771 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
772 dma_addr_t *dma_handle, gfp_t gfp)
774 static inline void dmam_free_coherent(struct device *dev, size_t size,
775 void *vaddr, dma_addr_t dma_handle) { }
776 #endif /* !CONFIG_HAS_DMA */
778 extern void *dmam_alloc_attrs(struct device *dev, size_t size,
779 dma_addr_t *dma_handle, gfp_t gfp,
780 unsigned long attrs);
781 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
782 extern int dmam_declare_coherent_memory(struct device *dev,
783 phys_addr_t phys_addr,
784 dma_addr_t device_addr, size_t size,
786 extern void dmam_release_declared_memory(struct device *dev);
787 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
788 static inline int dmam_declare_coherent_memory(struct device *dev,
789 phys_addr_t phys_addr, dma_addr_t device_addr,
790 size_t size, gfp_t gfp)
795 static inline void dmam_release_declared_memory(struct device *dev)
798 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
800 static inline void *dma_alloc_wc(struct device *dev, size_t size,
801 dma_addr_t *dma_addr, gfp_t gfp)
803 unsigned long attrs = DMA_ATTR_NO_WARN;
805 if (gfp & __GFP_NOWARN)
806 attrs |= DMA_ATTR_NO_WARN;
808 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
810 #ifndef dma_alloc_writecombine
811 #define dma_alloc_writecombine dma_alloc_wc
814 static inline void dma_free_wc(struct device *dev, size_t size,
815 void *cpu_addr, dma_addr_t dma_addr)
817 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
818 DMA_ATTR_WRITE_COMBINE);
820 #ifndef dma_free_writecombine
821 #define dma_free_writecombine dma_free_wc
824 static inline int dma_mmap_wc(struct device *dev,
825 struct vm_area_struct *vma,
826 void *cpu_addr, dma_addr_t dma_addr,
829 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
830 DMA_ATTR_WRITE_COMBINE);
832 #ifndef dma_mmap_writecombine
833 #define dma_mmap_writecombine dma_mmap_wc
836 #ifdef CONFIG_NEED_DMA_MAP_STATE
837 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
838 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
839 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
840 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
841 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
842 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
844 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
845 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
846 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
847 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
848 #define dma_unmap_len(PTR, LEN_NAME) (0)
849 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)