1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
21 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
22 * may be weakly ordered, that is that reads and writes may pass each other.
24 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
26 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
27 * buffered to improve performance.
29 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
31 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
32 * consistent or non-consistent memory as it sees fit.
34 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
36 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
37 * virtual mapping for the allocated buffer.
39 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
41 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
42 * the CPU cache for the given buffer assuming that it has been already
43 * transferred to 'device' domain.
45 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
47 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
52 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
53 * that it's probably not worth the time to try to allocate memory to in a way
54 * that gives better TLB efficiency.
56 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
58 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
59 * allocation failure reports (similarly to __GFP_NOWARN).
61 #define DMA_ATTR_NO_WARN (1UL << 8)
64 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
65 * accessible at an elevated privilege level (and ideally inaccessible or
66 * at least read-only at lesser-privileged levels).
68 #define DMA_ATTR_PRIVILEGED (1UL << 9)
71 void* (*alloc)(struct device *dev, size_t size,
72 dma_addr_t *dma_handle, gfp_t gfp,
74 void (*free)(struct device *dev, size_t size,
75 void *vaddr, dma_addr_t dma_handle,
77 int (*mmap)(struct device *, struct vm_area_struct *,
78 void *, dma_addr_t, size_t,
81 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
82 dma_addr_t, size_t, unsigned long attrs);
84 dma_addr_t (*map_page)(struct device *dev, struct page *page,
85 unsigned long offset, size_t size,
86 enum dma_data_direction dir,
88 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
89 size_t size, enum dma_data_direction dir,
92 * map_sg returns 0 on error and a value > 0 on success.
93 * It should never return a value < 0.
95 int (*map_sg)(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction dir,
98 void (*unmap_sg)(struct device *dev,
99 struct scatterlist *sg, int nents,
100 enum dma_data_direction dir,
101 unsigned long attrs);
102 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
103 size_t size, enum dma_data_direction dir,
104 unsigned long attrs);
105 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
106 size_t size, enum dma_data_direction dir,
107 unsigned long attrs);
108 void (*sync_single_for_cpu)(struct device *dev,
109 dma_addr_t dma_handle, size_t size,
110 enum dma_data_direction dir);
111 void (*sync_single_for_device)(struct device *dev,
112 dma_addr_t dma_handle, size_t size,
113 enum dma_data_direction dir);
114 void (*sync_sg_for_cpu)(struct device *dev,
115 struct scatterlist *sg, int nents,
116 enum dma_data_direction dir);
117 void (*sync_sg_for_device)(struct device *dev,
118 struct scatterlist *sg, int nents,
119 enum dma_data_direction dir);
120 int (*dma_supported)(struct device *dev, u64 mask);
121 u64 (*get_required_mask)(struct device *dev);
122 size_t (*max_mapping_size)(struct device *dev);
123 unsigned long (*get_merge_boundary)(struct device *dev);
127 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
128 * be given to a device to use as a DMA source or target. It is specific to a
129 * given device and there may be a translation between the CPU physical address
130 * space and the bus address space.
132 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
133 * be used directly in drivers, but checked for using dma_mapping_error()
136 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
138 extern const struct dma_map_ops dma_virt_ops;
139 extern const struct dma_map_ops dma_dummy_ops;
141 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
143 #ifdef CONFIG_DMA_DECLARE_COHERENT
145 * These three functions are only for dma allocator.
146 * Don't use them in device drivers.
148 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
149 dma_addr_t *dma_handle, void **ret);
150 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
152 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
153 void *cpu_addr, size_t size, int *ret);
155 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
156 int dma_release_from_global_coherent(int order, void *vaddr);
157 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
158 size_t size, int *ret);
161 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
162 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
163 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
165 static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
166 dma_addr_t *dma_handle)
171 static inline int dma_release_from_global_coherent(int order, void *vaddr)
176 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
177 void *cpu_addr, size_t size,
182 #endif /* CONFIG_DMA_DECLARE_COHERENT */
184 #ifdef CONFIG_HAS_DMA
185 #include <asm/dma-mapping.h>
187 #ifdef CONFIG_DMA_OPS
188 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
192 return get_arch_dma_ops(dev->bus);
195 static inline void set_dma_ops(struct device *dev,
196 const struct dma_map_ops *dma_ops)
198 dev->dma_ops = dma_ops;
200 #else /* CONFIG_DMA_OPS */
201 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
205 static inline void set_dma_ops(struct device *dev,
206 const struct dma_map_ops *dma_ops)
209 #endif /* CONFIG_DMA_OPS */
211 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
213 debug_dma_mapping_error(dev, dma_addr);
215 if (dma_addr == DMA_MAPPING_ERROR)
220 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
221 size_t offset, size_t size, enum dma_data_direction dir,
222 unsigned long attrs);
223 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
224 enum dma_data_direction dir, unsigned long attrs);
225 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
226 enum dma_data_direction dir, unsigned long attrs);
227 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
228 int nents, enum dma_data_direction dir,
229 unsigned long attrs);
230 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
231 size_t size, enum dma_data_direction dir, unsigned long attrs);
232 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
233 enum dma_data_direction dir, unsigned long attrs);
234 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
235 enum dma_data_direction dir);
236 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
237 size_t size, enum dma_data_direction dir);
238 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
239 int nelems, enum dma_data_direction dir);
240 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
241 int nelems, enum dma_data_direction dir);
242 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
243 gfp_t flag, unsigned long attrs);
244 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
245 dma_addr_t dma_handle, unsigned long attrs);
246 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
247 gfp_t gfp, unsigned long attrs);
248 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
249 dma_addr_t dma_handle);
250 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
251 void *cpu_addr, dma_addr_t dma_addr, size_t size,
252 unsigned long attrs);
253 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
254 void *cpu_addr, dma_addr_t dma_addr, size_t size,
255 unsigned long attrs);
256 bool dma_can_mmap(struct device *dev);
257 int dma_supported(struct device *dev, u64 mask);
258 int dma_set_mask(struct device *dev, u64 mask);
259 int dma_set_coherent_mask(struct device *dev, u64 mask);
260 u64 dma_get_required_mask(struct device *dev);
261 size_t dma_max_mapping_size(struct device *dev);
262 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
263 unsigned long dma_get_merge_boundary(struct device *dev);
264 #else /* CONFIG_HAS_DMA */
265 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
266 struct page *page, size_t offset, size_t size,
267 enum dma_data_direction dir, unsigned long attrs)
269 return DMA_MAPPING_ERROR;
271 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
272 size_t size, enum dma_data_direction dir, unsigned long attrs)
275 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
276 int nents, enum dma_data_direction dir, unsigned long attrs)
280 static inline void dma_unmap_sg_attrs(struct device *dev,
281 struct scatterlist *sg, int nents, enum dma_data_direction dir,
285 static inline dma_addr_t dma_map_resource(struct device *dev,
286 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
289 return DMA_MAPPING_ERROR;
291 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
292 size_t size, enum dma_data_direction dir, unsigned long attrs)
295 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
296 size_t size, enum dma_data_direction dir)
299 static inline void dma_sync_single_for_device(struct device *dev,
300 dma_addr_t addr, size_t size, enum dma_data_direction dir)
303 static inline void dma_sync_sg_for_cpu(struct device *dev,
304 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
307 static inline void dma_sync_sg_for_device(struct device *dev,
308 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
311 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
315 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
316 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
320 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
321 dma_addr_t dma_handle, unsigned long attrs)
324 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
325 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
329 static inline void dmam_free_coherent(struct device *dev, size_t size,
330 void *vaddr, dma_addr_t dma_handle)
333 static inline int dma_get_sgtable_attrs(struct device *dev,
334 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
335 size_t size, unsigned long attrs)
339 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
340 void *cpu_addr, dma_addr_t dma_addr, size_t size,
345 static inline bool dma_can_mmap(struct device *dev)
349 static inline int dma_supported(struct device *dev, u64 mask)
353 static inline int dma_set_mask(struct device *dev, u64 mask)
357 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
361 static inline u64 dma_get_required_mask(struct device *dev)
365 static inline size_t dma_max_mapping_size(struct device *dev)
369 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
373 static inline unsigned long dma_get_merge_boundary(struct device *dev)
377 #endif /* CONFIG_HAS_DMA */
379 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
380 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
382 return dma_alloc_attrs(dev, size, dma_handle, gfp,
383 DMA_ATTR_NON_CONSISTENT);
385 static inline void dma_free_noncoherent(struct device *dev, size_t size,
386 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
388 dma_free_attrs(dev, size, vaddr, dma_handle, DMA_ATTR_NON_CONSISTENT);
391 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
392 size_t size, enum dma_data_direction dir, unsigned long attrs)
394 /* DMA must never operate on areas that might be remapped. */
395 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
396 "rejecting DMA map of vmalloc memory\n"))
397 return DMA_MAPPING_ERROR;
398 debug_dma_map_single(dev, ptr, size);
399 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
403 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
404 size_t size, enum dma_data_direction dir, unsigned long attrs)
406 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
409 static inline void dma_sync_single_range_for_cpu(struct device *dev,
410 dma_addr_t addr, unsigned long offset, size_t size,
411 enum dma_data_direction dir)
413 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
416 static inline void dma_sync_single_range_for_device(struct device *dev,
417 dma_addr_t addr, unsigned long offset, size_t size,
418 enum dma_data_direction dir)
420 return dma_sync_single_for_device(dev, addr + offset, size, dir);
424 * dma_map_sgtable - Map the given buffer for DMA
425 * @dev: The device for which to perform the DMA operation
426 * @sgt: The sg_table object describing the buffer
427 * @dir: DMA direction
428 * @attrs: Optional DMA attributes for the map operation
430 * Maps a buffer described by a scatterlist stored in the given sg_table
431 * object for the @dir DMA operation by the @dev device. After success the
432 * ownership for the buffer is transferred to the DMA domain. One has to
433 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
434 * ownership of the buffer back to the CPU domain before touching the
437 * Returns 0 on success or -EINVAL on error during mapping the buffer.
439 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
440 enum dma_data_direction dir, unsigned long attrs)
444 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
452 * dma_unmap_sgtable - Unmap the given buffer for DMA
453 * @dev: The device for which to perform the DMA operation
454 * @sgt: The sg_table object describing the buffer
455 * @dir: DMA direction
456 * @attrs: Optional DMA attributes for the unmap operation
458 * Unmaps a buffer described by a scatterlist stored in the given sg_table
459 * object for the @dir DMA operation by the @dev device. After this function
460 * the ownership of the buffer is transferred back to the CPU domain.
462 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
463 enum dma_data_direction dir, unsigned long attrs)
465 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
469 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
470 * @dev: The device for which to perform the DMA operation
471 * @sgt: The sg_table object describing the buffer
472 * @dir: DMA direction
474 * Performs the needed cache synchronization and moves the ownership of the
475 * buffer back to the CPU domain, so it is safe to perform any access to it
476 * by the CPU. Before doing any further DMA operations, one has to transfer
477 * the ownership of the buffer back to the DMA domain by calling the
478 * dma_sync_sgtable_for_device().
480 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
481 struct sg_table *sgt, enum dma_data_direction dir)
483 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
487 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
488 * @dev: The device for which to perform the DMA operation
489 * @sgt: The sg_table object describing the buffer
490 * @dir: DMA direction
492 * Performs the needed cache synchronization and moves the ownership of the
493 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
494 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
495 * dma_unmap_sgtable().
497 static inline void dma_sync_sgtable_for_device(struct device *dev,
498 struct sg_table *sgt, enum dma_data_direction dir)
500 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
503 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
504 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
505 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
506 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
507 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
508 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
509 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
510 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
512 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
513 void *cpu_addr, dma_addr_t dma_addr, size_t size,
514 unsigned long attrs);
516 struct page **dma_common_find_pages(void *cpu_addr);
517 void *dma_common_contiguous_remap(struct page *page, size_t size,
518 pgprot_t prot, const void *caller);
520 void *dma_common_pages_remap(struct page **pages, size_t size,
521 pgprot_t prot, const void *caller);
522 void dma_common_free_remap(void *cpu_addr, size_t size);
524 struct page *dma_alloc_from_pool(struct device *dev, size_t size,
525 void **cpu_addr, gfp_t flags,
526 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
527 bool dma_free_from_pool(struct device *dev, void *start, size_t size);
530 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
531 dma_addr_t dma_addr, size_t size, unsigned long attrs);
533 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
534 dma_addr_t *dma_handle, gfp_t gfp)
537 return dma_alloc_attrs(dev, size, dma_handle, gfp,
538 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
541 static inline void dma_free_coherent(struct device *dev, size_t size,
542 void *cpu_addr, dma_addr_t dma_handle)
544 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
548 static inline u64 dma_get_mask(struct device *dev)
550 if (dev->dma_mask && *dev->dma_mask)
551 return *dev->dma_mask;
552 return DMA_BIT_MASK(32);
556 * Set both the DMA mask and the coherent DMA mask to the same thing.
557 * Note that we don't check the return value from dma_set_coherent_mask()
558 * as the DMA API guarantees that the coherent DMA mask can be set to
559 * the same or smaller than the streaming DMA mask.
561 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
563 int rc = dma_set_mask(dev, mask);
565 dma_set_coherent_mask(dev, mask);
570 * Similar to the above, except it deals with the case where the device
571 * does not have dev->dma_mask appropriately setup.
573 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
575 dev->dma_mask = &dev->coherent_dma_mask;
576 return dma_set_mask_and_coherent(dev, mask);
580 * dma_addressing_limited - return if the device is addressing limited
581 * @dev: device to check
583 * Return %true if the devices DMA mask is too small to address all memory in
584 * the system, else %false. Lack of addressing bits is the prime reason for
585 * bounce buffering, but might not be the only one.
587 static inline bool dma_addressing_limited(struct device *dev)
589 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
590 dma_get_required_mask(dev);
593 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
594 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
595 const struct iommu_ops *iommu, bool coherent);
597 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
598 u64 size, const struct iommu_ops *iommu, bool coherent)
601 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
603 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
604 void arch_teardown_dma_ops(struct device *dev);
606 static inline void arch_teardown_dma_ops(struct device *dev)
609 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
611 static inline unsigned int dma_get_max_seg_size(struct device *dev)
613 if (dev->dma_parms && dev->dma_parms->max_segment_size)
614 return dev->dma_parms->max_segment_size;
618 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
620 if (dev->dma_parms) {
621 dev->dma_parms->max_segment_size = size;
627 static inline unsigned long dma_get_seg_boundary(struct device *dev)
629 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
630 return dev->dma_parms->segment_boundary_mask;
635 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
636 * @dev: device to guery the boundary for
637 * @page_shift: ilog() of the IOMMU page size
639 * Return the segment boundary in IOMMU page units (which may be different from
640 * the CPU page size) for the passed in device.
642 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
643 * non-DMA API callers.
645 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
646 unsigned int page_shift)
649 return (U32_MAX >> page_shift) + 1;
650 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
653 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
655 if (dev->dma_parms) {
656 dev->dma_parms->segment_boundary_mask = mask;
662 static inline int dma_get_cache_alignment(void)
664 #ifdef ARCH_DMA_MINALIGN
665 return ARCH_DMA_MINALIGN;
670 #ifdef CONFIG_DMA_DECLARE_COHERENT
671 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
672 dma_addr_t device_addr, size_t size);
675 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
676 dma_addr_t device_addr, size_t size)
680 #endif /* CONFIG_DMA_DECLARE_COHERENT */
682 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
683 dma_addr_t *dma_handle, gfp_t gfp)
685 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
686 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
689 static inline void *dma_alloc_wc(struct device *dev, size_t size,
690 dma_addr_t *dma_addr, gfp_t gfp)
692 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
694 if (gfp & __GFP_NOWARN)
695 attrs |= DMA_ATTR_NO_WARN;
697 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
700 static inline void dma_free_wc(struct device *dev, size_t size,
701 void *cpu_addr, dma_addr_t dma_addr)
703 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
704 DMA_ATTR_WRITE_COMBINE);
707 static inline int dma_mmap_wc(struct device *dev,
708 struct vm_area_struct *vma,
709 void *cpu_addr, dma_addr_t dma_addr,
712 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
713 DMA_ATTR_WRITE_COMBINE);
716 #ifdef CONFIG_NEED_DMA_MAP_STATE
717 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
718 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
719 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
720 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
721 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
722 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
724 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
725 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
726 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
727 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
728 #define dma_unmap_len(PTR, LEN_NAME) (0)
729 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
733 * Legacy interface to set up the dma offset map. Drivers really should not
734 * actually use it, but we have a few legacy cases left.
736 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
737 dma_addr_t dma_start, u64 size);
739 #endif /* _LINUX_DMA_MAPPING_H */