1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/bug.h>
12 #include <linux/mem_encrypt.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
20 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
21 * may be weakly ordered, that is that reads and writes may pass each other.
23 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
25 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
26 * buffered to improve performance.
28 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
30 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
31 * virtual mapping for the allocated buffer.
33 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
35 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
36 * the CPU cache for the given buffer assuming that it has been already
37 * transferred to 'device' domain.
39 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
41 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
44 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
46 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
47 * that it's probably not worth the time to try to allocate memory to in a way
48 * that gives better TLB efficiency.
50 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
52 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
53 * allocation failure reports (similarly to __GFP_NOWARN).
55 #define DMA_ATTR_NO_WARN (1UL << 8)
58 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
59 * accessible at an elevated privilege level (and ideally inaccessible or
60 * at least read-only at lesser-privileged levels).
62 #define DMA_ATTR_PRIVILEGED (1UL << 9)
65 * This is a hint to the DMA-mapping subsystem that the device is expected
66 * to overwrite the entire mapped size, thus the caller does not require any
67 * of the previous buffer contents to be preserved. This allows
68 * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
70 #define DMA_ATTR_OVERWRITE (1UL << 10)
73 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
74 * be given to a device to use as a DMA source or target. It is specific to a
75 * given device and there may be a translation between the CPU physical address
76 * space and the bus address space.
78 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
79 * be used directly in drivers, but checked for using dma_mapping_error()
82 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
84 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
86 #ifdef CONFIG_DMA_API_DEBUG
87 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
88 void debug_dma_map_single(struct device *dev, const void *addr,
91 static inline void debug_dma_mapping_error(struct device *dev,
95 static inline void debug_dma_map_single(struct device *dev, const void *addr,
99 #endif /* CONFIG_DMA_API_DEBUG */
101 #ifdef CONFIG_HAS_DMA
102 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
104 debug_dma_mapping_error(dev, dma_addr);
106 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
111 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
112 size_t offset, size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
115 enum dma_data_direction dir, unsigned long attrs);
116 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
117 int nents, enum dma_data_direction dir, unsigned long attrs);
118 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
119 int nents, enum dma_data_direction dir,
120 unsigned long attrs);
121 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
122 enum dma_data_direction dir, unsigned long attrs);
123 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
124 size_t size, enum dma_data_direction dir, unsigned long attrs);
125 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
126 enum dma_data_direction dir, unsigned long attrs);
127 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
128 enum dma_data_direction dir);
129 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
130 size_t size, enum dma_data_direction dir);
131 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
132 int nelems, enum dma_data_direction dir);
133 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
134 int nelems, enum dma_data_direction dir);
135 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
136 gfp_t flag, unsigned long attrs);
137 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
138 dma_addr_t dma_handle, unsigned long attrs);
139 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
140 gfp_t gfp, unsigned long attrs);
141 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
142 dma_addr_t dma_handle);
143 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
144 void *cpu_addr, dma_addr_t dma_addr, size_t size,
145 unsigned long attrs);
146 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
147 void *cpu_addr, dma_addr_t dma_addr, size_t size,
148 unsigned long attrs);
149 bool dma_can_mmap(struct device *dev);
150 int dma_supported(struct device *dev, u64 mask);
151 int dma_set_mask(struct device *dev, u64 mask);
152 int dma_set_coherent_mask(struct device *dev, u64 mask);
153 u64 dma_get_required_mask(struct device *dev);
154 size_t dma_max_mapping_size(struct device *dev);
155 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
156 unsigned long dma_get_merge_boundary(struct device *dev);
157 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
158 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
159 void dma_free_noncontiguous(struct device *dev, size_t size,
160 struct sg_table *sgt, enum dma_data_direction dir);
161 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
162 struct sg_table *sgt);
163 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
164 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
165 size_t size, struct sg_table *sgt);
166 #else /* CONFIG_HAS_DMA */
167 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
168 struct page *page, size_t offset, size_t size,
169 enum dma_data_direction dir, unsigned long attrs)
171 return DMA_MAPPING_ERROR;
173 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
174 size_t size, enum dma_data_direction dir, unsigned long attrs)
177 static inline unsigned int dma_map_sg_attrs(struct device *dev,
178 struct scatterlist *sg, int nents, enum dma_data_direction dir,
183 static inline void dma_unmap_sg_attrs(struct device *dev,
184 struct scatterlist *sg, int nents, enum dma_data_direction dir,
188 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
189 enum dma_data_direction dir, unsigned long attrs)
193 static inline dma_addr_t dma_map_resource(struct device *dev,
194 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
197 return DMA_MAPPING_ERROR;
199 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
200 size_t size, enum dma_data_direction dir, unsigned long attrs)
203 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
204 size_t size, enum dma_data_direction dir)
207 static inline void dma_sync_single_for_device(struct device *dev,
208 dma_addr_t addr, size_t size, enum dma_data_direction dir)
211 static inline void dma_sync_sg_for_cpu(struct device *dev,
212 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
215 static inline void dma_sync_sg_for_device(struct device *dev,
216 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
219 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
223 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
224 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
228 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
229 dma_addr_t dma_handle, unsigned long attrs)
232 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
233 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
237 static inline void dmam_free_coherent(struct device *dev, size_t size,
238 void *vaddr, dma_addr_t dma_handle)
241 static inline int dma_get_sgtable_attrs(struct device *dev,
242 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
243 size_t size, unsigned long attrs)
247 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
248 void *cpu_addr, dma_addr_t dma_addr, size_t size,
253 static inline bool dma_can_mmap(struct device *dev)
257 static inline int dma_supported(struct device *dev, u64 mask)
261 static inline int dma_set_mask(struct device *dev, u64 mask)
265 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
269 static inline u64 dma_get_required_mask(struct device *dev)
273 static inline size_t dma_max_mapping_size(struct device *dev)
277 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
281 static inline unsigned long dma_get_merge_boundary(struct device *dev)
285 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
286 size_t size, enum dma_data_direction dir, gfp_t gfp,
291 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
292 struct sg_table *sgt, enum dma_data_direction dir)
295 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
296 struct sg_table *sgt)
300 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
303 static inline int dma_mmap_noncontiguous(struct device *dev,
304 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
308 #endif /* CONFIG_HAS_DMA */
310 struct page *dma_alloc_pages(struct device *dev, size_t size,
311 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
312 void dma_free_pages(struct device *dev, size_t size, struct page *page,
313 dma_addr_t dma_handle, enum dma_data_direction dir);
314 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
315 size_t size, struct page *page);
317 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
318 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
320 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
321 return page ? page_address(page) : NULL;
324 static inline void dma_free_noncoherent(struct device *dev, size_t size,
325 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
327 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
330 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
331 size_t size, enum dma_data_direction dir, unsigned long attrs)
333 /* DMA must never operate on areas that might be remapped. */
334 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
335 "rejecting DMA map of vmalloc memory\n"))
336 return DMA_MAPPING_ERROR;
337 debug_dma_map_single(dev, ptr, size);
338 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
342 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
343 size_t size, enum dma_data_direction dir, unsigned long attrs)
345 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
348 static inline void dma_sync_single_range_for_cpu(struct device *dev,
349 dma_addr_t addr, unsigned long offset, size_t size,
350 enum dma_data_direction dir)
352 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
355 static inline void dma_sync_single_range_for_device(struct device *dev,
356 dma_addr_t addr, unsigned long offset, size_t size,
357 enum dma_data_direction dir)
359 return dma_sync_single_for_device(dev, addr + offset, size, dir);
363 * dma_unmap_sgtable - Unmap the given buffer for DMA
364 * @dev: The device for which to perform the DMA operation
365 * @sgt: The sg_table object describing the buffer
366 * @dir: DMA direction
367 * @attrs: Optional DMA attributes for the unmap operation
369 * Unmaps a buffer described by a scatterlist stored in the given sg_table
370 * object for the @dir DMA operation by the @dev device. After this function
371 * the ownership of the buffer is transferred back to the CPU domain.
373 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
374 enum dma_data_direction dir, unsigned long attrs)
376 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
380 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
381 * @dev: The device for which to perform the DMA operation
382 * @sgt: The sg_table object describing the buffer
383 * @dir: DMA direction
385 * Performs the needed cache synchronization and moves the ownership of the
386 * buffer back to the CPU domain, so it is safe to perform any access to it
387 * by the CPU. Before doing any further DMA operations, one has to transfer
388 * the ownership of the buffer back to the DMA domain by calling the
389 * dma_sync_sgtable_for_device().
391 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
392 struct sg_table *sgt, enum dma_data_direction dir)
394 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
398 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
399 * @dev: The device for which to perform the DMA operation
400 * @sgt: The sg_table object describing the buffer
401 * @dir: DMA direction
403 * Performs the needed cache synchronization and moves the ownership of the
404 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
405 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
406 * dma_unmap_sgtable().
408 static inline void dma_sync_sgtable_for_device(struct device *dev,
409 struct sg_table *sgt, enum dma_data_direction dir)
411 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
414 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
415 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
416 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
417 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
418 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
419 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
420 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
421 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
423 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
424 dma_addr_t *dma_handle, gfp_t gfp)
426 return dma_alloc_attrs(dev, size, dma_handle, gfp,
427 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
430 static inline void dma_free_coherent(struct device *dev, size_t size,
431 void *cpu_addr, dma_addr_t dma_handle)
433 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
437 static inline u64 dma_get_mask(struct device *dev)
439 if (dev->dma_mask && *dev->dma_mask)
440 return *dev->dma_mask;
441 return DMA_BIT_MASK(32);
445 * Set both the DMA mask and the coherent DMA mask to the same thing.
446 * Note that we don't check the return value from dma_set_coherent_mask()
447 * as the DMA API guarantees that the coherent DMA mask can be set to
448 * the same or smaller than the streaming DMA mask.
450 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
452 int rc = dma_set_mask(dev, mask);
454 dma_set_coherent_mask(dev, mask);
459 * Similar to the above, except it deals with the case where the device
460 * does not have dev->dma_mask appropriately setup.
462 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
464 dev->dma_mask = &dev->coherent_dma_mask;
465 return dma_set_mask_and_coherent(dev, mask);
469 * dma_addressing_limited - return if the device is addressing limited
470 * @dev: device to check
472 * Return %true if the devices DMA mask is too small to address all memory in
473 * the system, else %false. Lack of addressing bits is the prime reason for
474 * bounce buffering, but might not be the only one.
476 static inline bool dma_addressing_limited(struct device *dev)
478 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
479 dma_get_required_mask(dev);
482 static inline unsigned int dma_get_max_seg_size(struct device *dev)
484 if (dev->dma_parms && dev->dma_parms->max_segment_size)
485 return dev->dma_parms->max_segment_size;
489 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
491 if (dev->dma_parms) {
492 dev->dma_parms->max_segment_size = size;
498 static inline unsigned long dma_get_seg_boundary(struct device *dev)
500 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
501 return dev->dma_parms->segment_boundary_mask;
506 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
507 * @dev: device to guery the boundary for
508 * @page_shift: ilog() of the IOMMU page size
510 * Return the segment boundary in IOMMU page units (which may be different from
511 * the CPU page size) for the passed in device.
513 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
514 * non-DMA API callers.
516 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
517 unsigned int page_shift)
520 return (U32_MAX >> page_shift) + 1;
521 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
524 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
526 if (dev->dma_parms) {
527 dev->dma_parms->segment_boundary_mask = mask;
533 static inline unsigned int dma_get_min_align_mask(struct device *dev)
536 return dev->dma_parms->min_align_mask;
540 static inline int dma_set_min_align_mask(struct device *dev,
541 unsigned int min_align_mask)
543 if (WARN_ON_ONCE(!dev->dma_parms))
545 dev->dma_parms->min_align_mask = min_align_mask;
549 static inline int dma_get_cache_alignment(void)
551 #ifdef ARCH_DMA_MINALIGN
552 return ARCH_DMA_MINALIGN;
557 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
558 dma_addr_t *dma_handle, gfp_t gfp)
560 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
561 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
564 static inline void *dma_alloc_wc(struct device *dev, size_t size,
565 dma_addr_t *dma_addr, gfp_t gfp)
567 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
569 if (gfp & __GFP_NOWARN)
570 attrs |= DMA_ATTR_NO_WARN;
572 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
575 static inline void dma_free_wc(struct device *dev, size_t size,
576 void *cpu_addr, dma_addr_t dma_addr)
578 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
579 DMA_ATTR_WRITE_COMBINE);
582 static inline int dma_mmap_wc(struct device *dev,
583 struct vm_area_struct *vma,
584 void *cpu_addr, dma_addr_t dma_addr,
587 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
588 DMA_ATTR_WRITE_COMBINE);
591 #ifdef CONFIG_NEED_DMA_MAP_STATE
592 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
593 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
594 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
595 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
596 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
597 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
599 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
600 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
601 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
602 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
603 #define dma_unmap_len(PTR, LEN_NAME) (0)
604 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
607 #endif /* _LINUX_DMA_MAPPING_H */