1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
21 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
22 * may be weakly ordered, that is that reads and writes may pass each other.
24 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
26 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
27 * buffered to improve performance.
29 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
31 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
32 * consistent or non-consistent memory as it sees fit.
34 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
36 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
37 * virtual mapping for the allocated buffer.
39 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
41 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
42 * the CPU cache for the given buffer assuming that it has been already
43 * transferred to 'device' domain.
45 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
47 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
52 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
53 * that it's probably not worth the time to try to allocate memory to in a way
54 * that gives better TLB efficiency.
56 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
58 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
59 * allocation failure reports (similarly to __GFP_NOWARN).
61 #define DMA_ATTR_NO_WARN (1UL << 8)
64 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
65 * accessible at an elevated privilege level (and ideally inaccessible or
66 * at least read-only at lesser-privileged levels).
68 #define DMA_ATTR_PRIVILEGED (1UL << 9)
71 * A dma_addr_t can hold any valid DMA or bus address for the platform.
72 * It can be given to a device to use as a DMA source or target. A CPU cannot
73 * reference a dma_addr_t directly because there may be translation between
74 * its physical address space and the bus address space.
77 void* (*alloc)(struct device *dev, size_t size,
78 dma_addr_t *dma_handle, gfp_t gfp,
80 void (*free)(struct device *dev, size_t size,
81 void *vaddr, dma_addr_t dma_handle,
83 int (*mmap)(struct device *, struct vm_area_struct *,
84 void *, dma_addr_t, size_t,
87 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
88 dma_addr_t, size_t, unsigned long attrs);
90 dma_addr_t (*map_page)(struct device *dev, struct page *page,
91 unsigned long offset, size_t size,
92 enum dma_data_direction dir,
94 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
95 size_t size, enum dma_data_direction dir,
98 * map_sg returns 0 on error and a value > 0 on success.
99 * It should never return a value < 0.
101 int (*map_sg)(struct device *dev, struct scatterlist *sg,
102 int nents, enum dma_data_direction dir,
103 unsigned long attrs);
104 void (*unmap_sg)(struct device *dev,
105 struct scatterlist *sg, int nents,
106 enum dma_data_direction dir,
107 unsigned long attrs);
108 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
109 size_t size, enum dma_data_direction dir,
110 unsigned long attrs);
111 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*sync_single_for_cpu)(struct device *dev,
115 dma_addr_t dma_handle, size_t size,
116 enum dma_data_direction dir);
117 void (*sync_single_for_device)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_sg_for_cpu)(struct device *dev,
121 struct scatterlist *sg, int nents,
122 enum dma_data_direction dir);
123 void (*sync_sg_for_device)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
127 enum dma_data_direction direction);
128 int (*dma_supported)(struct device *dev, u64 mask);
129 u64 (*get_required_mask)(struct device *dev);
130 size_t (*max_mapping_size)(struct device *dev);
131 unsigned long (*get_merge_boundary)(struct device *dev);
134 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
136 extern const struct dma_map_ops dma_virt_ops;
137 extern const struct dma_map_ops dma_dummy_ops;
139 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
141 #define DMA_MASK_NONE 0x0ULL
143 static inline int valid_dma_direction(int dma_direction)
145 return ((dma_direction == DMA_BIDIRECTIONAL) ||
146 (dma_direction == DMA_TO_DEVICE) ||
147 (dma_direction == DMA_FROM_DEVICE));
150 #ifdef CONFIG_DMA_DECLARE_COHERENT
152 * These three functions are only for dma allocator.
153 * Don't use them in device drivers.
155 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
156 dma_addr_t *dma_handle, void **ret);
157 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
159 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
160 void *cpu_addr, size_t size, int *ret);
162 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
163 int dma_release_from_global_coherent(int order, void *vaddr);
164 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
165 size_t size, int *ret);
168 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
169 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
170 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
172 static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
173 dma_addr_t *dma_handle)
178 static inline int dma_release_from_global_coherent(int order, void *vaddr)
183 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
184 void *cpu_addr, size_t size,
189 #endif /* CONFIG_DMA_DECLARE_COHERENT */
191 static inline bool dma_is_direct(const struct dma_map_ops *ops)
197 * All the dma_direct_* declarations are here just for the indirect call bypass,
198 * and must not be used directly drivers!
200 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
201 unsigned long offset, size_t size, enum dma_data_direction dir,
202 unsigned long attrs);
203 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
204 enum dma_data_direction dir, unsigned long attrs);
205 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
206 size_t size, enum dma_data_direction dir, unsigned long attrs);
208 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
209 defined(CONFIG_SWIOTLB)
210 void dma_direct_sync_single_for_device(struct device *dev,
211 dma_addr_t addr, size_t size, enum dma_data_direction dir);
212 void dma_direct_sync_sg_for_device(struct device *dev,
213 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
215 static inline void dma_direct_sync_single_for_device(struct device *dev,
216 dma_addr_t addr, size_t size, enum dma_data_direction dir)
219 static inline void dma_direct_sync_sg_for_device(struct device *dev,
220 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
225 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
226 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
227 defined(CONFIG_SWIOTLB)
228 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
229 size_t size, enum dma_data_direction dir, unsigned long attrs);
230 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
231 int nents, enum dma_data_direction dir, unsigned long attrs);
232 void dma_direct_sync_single_for_cpu(struct device *dev,
233 dma_addr_t addr, size_t size, enum dma_data_direction dir);
234 void dma_direct_sync_sg_for_cpu(struct device *dev,
235 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
237 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
238 size_t size, enum dma_data_direction dir, unsigned long attrs)
241 static inline void dma_direct_unmap_sg(struct device *dev,
242 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
246 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
247 dma_addr_t addr, size_t size, enum dma_data_direction dir)
250 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
251 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
256 size_t dma_direct_max_mapping_size(struct device *dev);
258 #ifdef CONFIG_HAS_DMA
259 #include <asm/dma-mapping.h>
261 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
265 return get_arch_dma_ops(dev->bus);
268 static inline void set_dma_ops(struct device *dev,
269 const struct dma_map_ops *dma_ops)
271 dev->dma_ops = dma_ops;
274 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
275 struct page *page, size_t offset, size_t size,
276 enum dma_data_direction dir, unsigned long attrs)
278 const struct dma_map_ops *ops = get_dma_ops(dev);
281 BUG_ON(!valid_dma_direction(dir));
282 if (dma_is_direct(ops))
283 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
285 addr = ops->map_page(dev, page, offset, size, dir, attrs);
286 debug_dma_map_page(dev, page, offset, size, dir, addr);
291 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
292 size_t size, enum dma_data_direction dir, unsigned long attrs)
294 const struct dma_map_ops *ops = get_dma_ops(dev);
296 BUG_ON(!valid_dma_direction(dir));
297 if (dma_is_direct(ops))
298 dma_direct_unmap_page(dev, addr, size, dir, attrs);
299 else if (ops->unmap_page)
300 ops->unmap_page(dev, addr, size, dir, attrs);
301 debug_dma_unmap_page(dev, addr, size, dir);
305 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
306 * It should never return a value < 0.
308 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
309 int nents, enum dma_data_direction dir,
312 const struct dma_map_ops *ops = get_dma_ops(dev);
315 BUG_ON(!valid_dma_direction(dir));
316 if (dma_is_direct(ops))
317 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
319 ents = ops->map_sg(dev, sg, nents, dir, attrs);
321 debug_dma_map_sg(dev, sg, nents, ents, dir);
326 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
327 int nents, enum dma_data_direction dir,
330 const struct dma_map_ops *ops = get_dma_ops(dev);
332 BUG_ON(!valid_dma_direction(dir));
333 debug_dma_unmap_sg(dev, sg, nents, dir);
334 if (dma_is_direct(ops))
335 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
336 else if (ops->unmap_sg)
337 ops->unmap_sg(dev, sg, nents, dir, attrs);
340 static inline dma_addr_t dma_map_resource(struct device *dev,
341 phys_addr_t phys_addr,
343 enum dma_data_direction dir,
346 const struct dma_map_ops *ops = get_dma_ops(dev);
347 dma_addr_t addr = DMA_MAPPING_ERROR;
349 BUG_ON(!valid_dma_direction(dir));
351 /* Don't allow RAM to be mapped */
352 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
353 return DMA_MAPPING_ERROR;
355 if (dma_is_direct(ops))
356 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
357 else if (ops->map_resource)
358 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
360 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
364 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
365 size_t size, enum dma_data_direction dir,
368 const struct dma_map_ops *ops = get_dma_ops(dev);
370 BUG_ON(!valid_dma_direction(dir));
371 if (!dma_is_direct(ops) && ops->unmap_resource)
372 ops->unmap_resource(dev, addr, size, dir, attrs);
373 debug_dma_unmap_resource(dev, addr, size, dir);
376 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
378 enum dma_data_direction dir)
380 const struct dma_map_ops *ops = get_dma_ops(dev);
382 BUG_ON(!valid_dma_direction(dir));
383 if (dma_is_direct(ops))
384 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
385 else if (ops->sync_single_for_cpu)
386 ops->sync_single_for_cpu(dev, addr, size, dir);
387 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
390 static inline void dma_sync_single_for_device(struct device *dev,
391 dma_addr_t addr, size_t size,
392 enum dma_data_direction dir)
394 const struct dma_map_ops *ops = get_dma_ops(dev);
396 BUG_ON(!valid_dma_direction(dir));
397 if (dma_is_direct(ops))
398 dma_direct_sync_single_for_device(dev, addr, size, dir);
399 else if (ops->sync_single_for_device)
400 ops->sync_single_for_device(dev, addr, size, dir);
401 debug_dma_sync_single_for_device(dev, addr, size, dir);
405 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
406 int nelems, enum dma_data_direction dir)
408 const struct dma_map_ops *ops = get_dma_ops(dev);
410 BUG_ON(!valid_dma_direction(dir));
411 if (dma_is_direct(ops))
412 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
413 else if (ops->sync_sg_for_cpu)
414 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
415 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
419 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
420 int nelems, enum dma_data_direction dir)
422 const struct dma_map_ops *ops = get_dma_ops(dev);
424 BUG_ON(!valid_dma_direction(dir));
425 if (dma_is_direct(ops))
426 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
427 else if (ops->sync_sg_for_device)
428 ops->sync_sg_for_device(dev, sg, nelems, dir);
429 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
433 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
435 debug_dma_mapping_error(dev, dma_addr);
437 if (dma_addr == DMA_MAPPING_ERROR)
442 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
443 gfp_t flag, unsigned long attrs);
444 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
445 dma_addr_t dma_handle, unsigned long attrs);
446 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
447 gfp_t gfp, unsigned long attrs);
448 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
449 dma_addr_t dma_handle);
450 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
451 enum dma_data_direction dir);
452 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
453 void *cpu_addr, dma_addr_t dma_addr, size_t size,
454 unsigned long attrs);
455 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
456 void *cpu_addr, dma_addr_t dma_addr, size_t size,
457 unsigned long attrs);
458 bool dma_can_mmap(struct device *dev);
459 int dma_supported(struct device *dev, u64 mask);
460 int dma_set_mask(struct device *dev, u64 mask);
461 int dma_set_coherent_mask(struct device *dev, u64 mask);
462 u64 dma_get_required_mask(struct device *dev);
463 size_t dma_max_mapping_size(struct device *dev);
464 unsigned long dma_get_merge_boundary(struct device *dev);
465 #else /* CONFIG_HAS_DMA */
466 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
467 struct page *page, size_t offset, size_t size,
468 enum dma_data_direction dir, unsigned long attrs)
470 return DMA_MAPPING_ERROR;
472 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
473 size_t size, enum dma_data_direction dir, unsigned long attrs)
476 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
477 int nents, enum dma_data_direction dir, unsigned long attrs)
481 static inline void dma_unmap_sg_attrs(struct device *dev,
482 struct scatterlist *sg, int nents, enum dma_data_direction dir,
486 static inline dma_addr_t dma_map_resource(struct device *dev,
487 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
490 return DMA_MAPPING_ERROR;
492 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
493 size_t size, enum dma_data_direction dir, unsigned long attrs)
496 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
497 size_t size, enum dma_data_direction dir)
500 static inline void dma_sync_single_for_device(struct device *dev,
501 dma_addr_t addr, size_t size, enum dma_data_direction dir)
504 static inline void dma_sync_sg_for_cpu(struct device *dev,
505 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
508 static inline void dma_sync_sg_for_device(struct device *dev,
509 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
512 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
516 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
517 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
521 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
522 dma_addr_t dma_handle, unsigned long attrs)
525 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
526 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
530 static inline void dmam_free_coherent(struct device *dev, size_t size,
531 void *vaddr, dma_addr_t dma_handle)
534 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
535 enum dma_data_direction dir)
538 static inline int dma_get_sgtable_attrs(struct device *dev,
539 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
540 size_t size, unsigned long attrs)
544 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
545 void *cpu_addr, dma_addr_t dma_addr, size_t size,
550 static inline bool dma_can_mmap(struct device *dev)
554 static inline int dma_supported(struct device *dev, u64 mask)
558 static inline int dma_set_mask(struct device *dev, u64 mask)
562 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
566 static inline u64 dma_get_required_mask(struct device *dev)
570 static inline size_t dma_max_mapping_size(struct device *dev)
574 static inline unsigned long dma_get_merge_boundary(struct device *dev)
578 #endif /* CONFIG_HAS_DMA */
580 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
581 size_t size, enum dma_data_direction dir, unsigned long attrs)
583 /* DMA must never operate on areas that might be remapped. */
584 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
585 "rejecting DMA map of vmalloc memory\n"))
586 return DMA_MAPPING_ERROR;
587 debug_dma_map_single(dev, ptr, size);
588 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
592 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
593 size_t size, enum dma_data_direction dir, unsigned long attrs)
595 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
598 static inline void dma_sync_single_range_for_cpu(struct device *dev,
599 dma_addr_t addr, unsigned long offset, size_t size,
600 enum dma_data_direction dir)
602 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
605 static inline void dma_sync_single_range_for_device(struct device *dev,
606 dma_addr_t addr, unsigned long offset, size_t size,
607 enum dma_data_direction dir)
609 return dma_sync_single_for_device(dev, addr + offset, size, dir);
613 * dma_map_sgtable - Map the given buffer for DMA
614 * @dev: The device for which to perform the DMA operation
615 * @sgt: The sg_table object describing the buffer
616 * @dir: DMA direction
617 * @attrs: Optional DMA attributes for the map operation
619 * Maps a buffer described by a scatterlist stored in the given sg_table
620 * object for the @dir DMA operation by the @dev device. After success the
621 * ownership for the buffer is transferred to the DMA domain. One has to
622 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
623 * ownership of the buffer back to the CPU domain before touching the
626 * Returns 0 on success or -EINVAL on error during mapping the buffer.
628 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
629 enum dma_data_direction dir, unsigned long attrs)
633 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
641 * dma_unmap_sgtable - Unmap the given buffer for DMA
642 * @dev: The device for which to perform the DMA operation
643 * @sgt: The sg_table object describing the buffer
644 * @dir: DMA direction
645 * @attrs: Optional DMA attributes for the unmap operation
647 * Unmaps a buffer described by a scatterlist stored in the given sg_table
648 * object for the @dir DMA operation by the @dev device. After this function
649 * the ownership of the buffer is transferred back to the CPU domain.
651 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
652 enum dma_data_direction dir, unsigned long attrs)
654 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
658 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
659 * @dev: The device for which to perform the DMA operation
660 * @sgt: The sg_table object describing the buffer
661 * @dir: DMA direction
663 * Performs the needed cache synchronization and moves the ownership of the
664 * buffer back to the CPU domain, so it is safe to perform any access to it
665 * by the CPU. Before doing any further DMA operations, one has to transfer
666 * the ownership of the buffer back to the DMA domain by calling the
667 * dma_sync_sgtable_for_device().
669 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
670 struct sg_table *sgt, enum dma_data_direction dir)
672 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
676 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
677 * @dev: The device for which to perform the DMA operation
678 * @sgt: The sg_table object describing the buffer
679 * @dir: DMA direction
681 * Performs the needed cache synchronization and moves the ownership of the
682 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
683 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
684 * dma_unmap_sgtable().
686 static inline void dma_sync_sgtable_for_device(struct device *dev,
687 struct sg_table *sgt, enum dma_data_direction dir)
689 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
692 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
693 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
694 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
695 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
696 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
697 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
698 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
699 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
701 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
702 void *cpu_addr, dma_addr_t dma_addr, size_t size,
703 unsigned long attrs);
705 struct page **dma_common_find_pages(void *cpu_addr);
706 void *dma_common_contiguous_remap(struct page *page, size_t size,
707 pgprot_t prot, const void *caller);
709 void *dma_common_pages_remap(struct page **pages, size_t size,
710 pgprot_t prot, const void *caller);
711 void dma_common_free_remap(void *cpu_addr, size_t size);
713 void *dma_alloc_from_pool(struct device *dev, size_t size,
714 struct page **ret_page, gfp_t flags);
715 bool dma_free_from_pool(struct device *dev, void *start, size_t size);
718 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
719 dma_addr_t dma_addr, size_t size, unsigned long attrs);
721 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
722 dma_addr_t *dma_handle, gfp_t gfp)
725 return dma_alloc_attrs(dev, size, dma_handle, gfp,
726 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
729 static inline void dma_free_coherent(struct device *dev, size_t size,
730 void *cpu_addr, dma_addr_t dma_handle)
732 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
736 static inline u64 dma_get_mask(struct device *dev)
738 if (dev->dma_mask && *dev->dma_mask)
739 return *dev->dma_mask;
740 return DMA_BIT_MASK(32);
744 * Set both the DMA mask and the coherent DMA mask to the same thing.
745 * Note that we don't check the return value from dma_set_coherent_mask()
746 * as the DMA API guarantees that the coherent DMA mask can be set to
747 * the same or smaller than the streaming DMA mask.
749 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
751 int rc = dma_set_mask(dev, mask);
753 dma_set_coherent_mask(dev, mask);
758 * Similar to the above, except it deals with the case where the device
759 * does not have dev->dma_mask appropriately setup.
761 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
763 dev->dma_mask = &dev->coherent_dma_mask;
764 return dma_set_mask_and_coherent(dev, mask);
768 * dma_addressing_limited - return if the device is addressing limited
769 * @dev: device to check
771 * Return %true if the devices DMA mask is too small to address all memory in
772 * the system, else %false. Lack of addressing bits is the prime reason for
773 * bounce buffering, but might not be the only one.
775 static inline bool dma_addressing_limited(struct device *dev)
777 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
778 dma_get_required_mask(dev);
781 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
782 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
783 const struct iommu_ops *iommu, bool coherent);
785 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
786 u64 size, const struct iommu_ops *iommu, bool coherent)
789 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
791 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
792 void arch_teardown_dma_ops(struct device *dev);
794 static inline void arch_teardown_dma_ops(struct device *dev)
797 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
799 static inline unsigned int dma_get_max_seg_size(struct device *dev)
801 if (dev->dma_parms && dev->dma_parms->max_segment_size)
802 return dev->dma_parms->max_segment_size;
806 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
808 if (dev->dma_parms) {
809 dev->dma_parms->max_segment_size = size;
815 static inline unsigned long dma_get_seg_boundary(struct device *dev)
817 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
818 return dev->dma_parms->segment_boundary_mask;
819 return DMA_BIT_MASK(32);
822 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
824 if (dev->dma_parms) {
825 dev->dma_parms->segment_boundary_mask = mask;
831 static inline int dma_get_cache_alignment(void)
833 #ifdef ARCH_DMA_MINALIGN
834 return ARCH_DMA_MINALIGN;
839 #ifdef CONFIG_DMA_DECLARE_COHERENT
840 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
841 dma_addr_t device_addr, size_t size);
844 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
845 dma_addr_t device_addr, size_t size)
849 #endif /* CONFIG_DMA_DECLARE_COHERENT */
851 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
852 dma_addr_t *dma_handle, gfp_t gfp)
854 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
855 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
858 static inline void *dma_alloc_wc(struct device *dev, size_t size,
859 dma_addr_t *dma_addr, gfp_t gfp)
861 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
863 if (gfp & __GFP_NOWARN)
864 attrs |= DMA_ATTR_NO_WARN;
866 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
869 static inline void dma_free_wc(struct device *dev, size_t size,
870 void *cpu_addr, dma_addr_t dma_addr)
872 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
873 DMA_ATTR_WRITE_COMBINE);
876 static inline int dma_mmap_wc(struct device *dev,
877 struct vm_area_struct *vma,
878 void *cpu_addr, dma_addr_t dma_addr,
881 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
882 DMA_ATTR_WRITE_COMBINE);
885 #ifdef CONFIG_NEED_DMA_MAP_STATE
886 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
887 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
888 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
889 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
890 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
891 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
893 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
894 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
895 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
896 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
897 #define dma_unmap_len(PTR, LEN_NAME) (0)
898 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)