1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
21 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
22 * may be weakly ordered, that is that reads and writes may pass each other.
24 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
26 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
27 * buffered to improve performance.
29 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
31 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
32 * virtual mapping for the allocated buffer.
34 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
36 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
37 * the CPU cache for the given buffer assuming that it has been already
38 * transferred to 'device' domain.
40 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
42 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
45 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
47 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
48 * that it's probably not worth the time to try to allocate memory to in a way
49 * that gives better TLB efficiency.
51 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
53 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
54 * allocation failure reports (similarly to __GFP_NOWARN).
56 #define DMA_ATTR_NO_WARN (1UL << 8)
59 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
60 * accessible at an elevated privilege level (and ideally inaccessible or
61 * at least read-only at lesser-privileged levels).
63 #define DMA_ATTR_PRIVILEGED (1UL << 9)
66 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
67 * be given to a device to use as a DMA source or target. It is specific to a
68 * given device and there may be a translation between the CPU physical address
69 * space and the bus address space.
71 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
72 * be used directly in drivers, but checked for using dma_mapping_error()
75 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
77 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
82 debug_dma_mapping_error(dev, dma_addr);
84 if (dma_addr == DMA_MAPPING_ERROR)
89 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
90 size_t offset, size_t size, enum dma_data_direction dir,
92 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
93 enum dma_data_direction dir, unsigned long attrs);
94 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
95 enum dma_data_direction dir, unsigned long attrs);
96 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir,
99 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
100 size_t size, enum dma_data_direction dir, unsigned long attrs);
101 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
102 enum dma_data_direction dir, unsigned long attrs);
103 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
104 enum dma_data_direction dir);
105 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
106 size_t size, enum dma_data_direction dir);
107 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
108 int nelems, enum dma_data_direction dir);
109 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
110 int nelems, enum dma_data_direction dir);
111 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
112 gfp_t flag, unsigned long attrs);
113 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
114 dma_addr_t dma_handle, unsigned long attrs);
115 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
116 gfp_t gfp, unsigned long attrs);
117 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
118 dma_addr_t dma_handle);
119 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
120 void *cpu_addr, dma_addr_t dma_addr, size_t size,
121 unsigned long attrs);
122 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
123 void *cpu_addr, dma_addr_t dma_addr, size_t size,
124 unsigned long attrs);
125 bool dma_can_mmap(struct device *dev);
126 int dma_supported(struct device *dev, u64 mask);
127 int dma_set_mask(struct device *dev, u64 mask);
128 int dma_set_coherent_mask(struct device *dev, u64 mask);
129 u64 dma_get_required_mask(struct device *dev);
130 size_t dma_max_mapping_size(struct device *dev);
131 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
132 unsigned long dma_get_merge_boundary(struct device *dev);
133 #else /* CONFIG_HAS_DMA */
134 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
135 struct page *page, size_t offset, size_t size,
136 enum dma_data_direction dir, unsigned long attrs)
138 return DMA_MAPPING_ERROR;
140 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
141 size_t size, enum dma_data_direction dir, unsigned long attrs)
144 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
145 int nents, enum dma_data_direction dir, unsigned long attrs)
149 static inline void dma_unmap_sg_attrs(struct device *dev,
150 struct scatterlist *sg, int nents, enum dma_data_direction dir,
154 static inline dma_addr_t dma_map_resource(struct device *dev,
155 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
158 return DMA_MAPPING_ERROR;
160 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
161 size_t size, enum dma_data_direction dir, unsigned long attrs)
164 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
165 size_t size, enum dma_data_direction dir)
168 static inline void dma_sync_single_for_device(struct device *dev,
169 dma_addr_t addr, size_t size, enum dma_data_direction dir)
172 static inline void dma_sync_sg_for_cpu(struct device *dev,
173 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
176 static inline void dma_sync_sg_for_device(struct device *dev,
177 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
180 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
184 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
185 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
189 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
190 dma_addr_t dma_handle, unsigned long attrs)
193 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
194 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
198 static inline void dmam_free_coherent(struct device *dev, size_t size,
199 void *vaddr, dma_addr_t dma_handle)
202 static inline int dma_get_sgtable_attrs(struct device *dev,
203 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
204 size_t size, unsigned long attrs)
208 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
209 void *cpu_addr, dma_addr_t dma_addr, size_t size,
214 static inline bool dma_can_mmap(struct device *dev)
218 static inline int dma_supported(struct device *dev, u64 mask)
222 static inline int dma_set_mask(struct device *dev, u64 mask)
226 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
230 static inline u64 dma_get_required_mask(struct device *dev)
234 static inline size_t dma_max_mapping_size(struct device *dev)
238 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
242 static inline unsigned long dma_get_merge_boundary(struct device *dev)
246 #endif /* CONFIG_HAS_DMA */
248 struct page *dma_alloc_pages(struct device *dev, size_t size,
249 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
250 void dma_free_pages(struct device *dev, size_t size, struct page *page,
251 dma_addr_t dma_handle, enum dma_data_direction dir);
252 void *dma_alloc_noncoherent(struct device *dev, size_t size,
253 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
254 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
255 dma_addr_t dma_handle, enum dma_data_direction dir);
257 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
258 size_t size, enum dma_data_direction dir, unsigned long attrs)
260 /* DMA must never operate on areas that might be remapped. */
261 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
262 "rejecting DMA map of vmalloc memory\n"))
263 return DMA_MAPPING_ERROR;
264 debug_dma_map_single(dev, ptr, size);
265 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
269 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
270 size_t size, enum dma_data_direction dir, unsigned long attrs)
272 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
275 static inline void dma_sync_single_range_for_cpu(struct device *dev,
276 dma_addr_t addr, unsigned long offset, size_t size,
277 enum dma_data_direction dir)
279 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
282 static inline void dma_sync_single_range_for_device(struct device *dev,
283 dma_addr_t addr, unsigned long offset, size_t size,
284 enum dma_data_direction dir)
286 return dma_sync_single_for_device(dev, addr + offset, size, dir);
290 * dma_map_sgtable - Map the given buffer for DMA
291 * @dev: The device for which to perform the DMA operation
292 * @sgt: The sg_table object describing the buffer
293 * @dir: DMA direction
294 * @attrs: Optional DMA attributes for the map operation
296 * Maps a buffer described by a scatterlist stored in the given sg_table
297 * object for the @dir DMA operation by the @dev device. After success the
298 * ownership for the buffer is transferred to the DMA domain. One has to
299 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
300 * ownership of the buffer back to the CPU domain before touching the
303 * Returns 0 on success or -EINVAL on error during mapping the buffer.
305 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
306 enum dma_data_direction dir, unsigned long attrs)
310 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
318 * dma_unmap_sgtable - Unmap the given buffer for DMA
319 * @dev: The device for which to perform the DMA operation
320 * @sgt: The sg_table object describing the buffer
321 * @dir: DMA direction
322 * @attrs: Optional DMA attributes for the unmap operation
324 * Unmaps a buffer described by a scatterlist stored in the given sg_table
325 * object for the @dir DMA operation by the @dev device. After this function
326 * the ownership of the buffer is transferred back to the CPU domain.
328 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
329 enum dma_data_direction dir, unsigned long attrs)
331 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
335 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
336 * @dev: The device for which to perform the DMA operation
337 * @sgt: The sg_table object describing the buffer
338 * @dir: DMA direction
340 * Performs the needed cache synchronization and moves the ownership of the
341 * buffer back to the CPU domain, so it is safe to perform any access to it
342 * by the CPU. Before doing any further DMA operations, one has to transfer
343 * the ownership of the buffer back to the DMA domain by calling the
344 * dma_sync_sgtable_for_device().
346 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
347 struct sg_table *sgt, enum dma_data_direction dir)
349 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
353 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
354 * @dev: The device for which to perform the DMA operation
355 * @sgt: The sg_table object describing the buffer
356 * @dir: DMA direction
358 * Performs the needed cache synchronization and moves the ownership of the
359 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
360 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
361 * dma_unmap_sgtable().
363 static inline void dma_sync_sgtable_for_device(struct device *dev,
364 struct sg_table *sgt, enum dma_data_direction dir)
366 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
369 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
370 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
371 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
372 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
373 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
374 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
375 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
376 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
378 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
379 void *cpu_addr, dma_addr_t dma_addr, size_t size,
380 unsigned long attrs);
381 struct page *dma_common_alloc_pages(struct device *dev, size_t size,
382 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
383 void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
384 dma_addr_t dma_handle, enum dma_data_direction dir);
385 struct page **dma_common_find_pages(void *cpu_addr);
386 void *dma_common_contiguous_remap(struct page *page, size_t size,
387 pgprot_t prot, const void *caller);
389 void *dma_common_pages_remap(struct page **pages, size_t size,
390 pgprot_t prot, const void *caller);
391 void dma_common_free_remap(void *cpu_addr, size_t size);
393 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
394 void **cpu_addr, gfp_t flags,
395 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
396 bool dma_free_from_pool(struct device *dev, void *start, size_t size);
399 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
400 dma_addr_t dma_addr, size_t size, unsigned long attrs);
402 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
403 dma_addr_t *dma_handle, gfp_t gfp)
406 return dma_alloc_attrs(dev, size, dma_handle, gfp,
407 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
410 static inline void dma_free_coherent(struct device *dev, size_t size,
411 void *cpu_addr, dma_addr_t dma_handle)
413 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
417 static inline u64 dma_get_mask(struct device *dev)
419 if (dev->dma_mask && *dev->dma_mask)
420 return *dev->dma_mask;
421 return DMA_BIT_MASK(32);
425 * Set both the DMA mask and the coherent DMA mask to the same thing.
426 * Note that we don't check the return value from dma_set_coherent_mask()
427 * as the DMA API guarantees that the coherent DMA mask can be set to
428 * the same or smaller than the streaming DMA mask.
430 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
432 int rc = dma_set_mask(dev, mask);
434 dma_set_coherent_mask(dev, mask);
439 * Similar to the above, except it deals with the case where the device
440 * does not have dev->dma_mask appropriately setup.
442 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
444 dev->dma_mask = &dev->coherent_dma_mask;
445 return dma_set_mask_and_coherent(dev, mask);
449 * dma_addressing_limited - return if the device is addressing limited
450 * @dev: device to check
452 * Return %true if the devices DMA mask is too small to address all memory in
453 * the system, else %false. Lack of addressing bits is the prime reason for
454 * bounce buffering, but might not be the only one.
456 static inline bool dma_addressing_limited(struct device *dev)
458 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
459 dma_get_required_mask(dev);
462 static inline unsigned int dma_get_max_seg_size(struct device *dev)
464 if (dev->dma_parms && dev->dma_parms->max_segment_size)
465 return dev->dma_parms->max_segment_size;
469 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
471 if (dev->dma_parms) {
472 dev->dma_parms->max_segment_size = size;
478 static inline unsigned long dma_get_seg_boundary(struct device *dev)
480 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
481 return dev->dma_parms->segment_boundary_mask;
486 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
487 * @dev: device to guery the boundary for
488 * @page_shift: ilog() of the IOMMU page size
490 * Return the segment boundary in IOMMU page units (which may be different from
491 * the CPU page size) for the passed in device.
493 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
494 * non-DMA API callers.
496 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
497 unsigned int page_shift)
500 return (U32_MAX >> page_shift) + 1;
501 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
504 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
506 if (dev->dma_parms) {
507 dev->dma_parms->segment_boundary_mask = mask;
513 static inline int dma_get_cache_alignment(void)
515 #ifdef ARCH_DMA_MINALIGN
516 return ARCH_DMA_MINALIGN;
521 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
522 dma_addr_t *dma_handle, gfp_t gfp)
524 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
525 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
528 static inline void *dma_alloc_wc(struct device *dev, size_t size,
529 dma_addr_t *dma_addr, gfp_t gfp)
531 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
533 if (gfp & __GFP_NOWARN)
534 attrs |= DMA_ATTR_NO_WARN;
536 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
539 static inline void dma_free_wc(struct device *dev, size_t size,
540 void *cpu_addr, dma_addr_t dma_addr)
542 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
543 DMA_ATTR_WRITE_COMBINE);
546 static inline int dma_mmap_wc(struct device *dev,
547 struct vm_area_struct *vma,
548 void *cpu_addr, dma_addr_t dma_addr,
551 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
552 DMA_ATTR_WRITE_COMBINE);
555 #ifdef CONFIG_NEED_DMA_MAP_STATE
556 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
557 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
558 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
559 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
560 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
561 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
563 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
564 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
565 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
566 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
567 #define dma_unmap_len(PTR, LEN_NAME) (0)
568 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
572 * Legacy interface to set up the dma offset map. Drivers really should not
573 * actually use it, but we have a few legacy cases left.
575 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
576 dma_addr_t dma_start, u64 size);
578 extern const struct dma_map_ops dma_virt_ops;
580 #endif /* _LINUX_DMA_MAPPING_H */