1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Internals of the DMA direct mapping implementation. Only for use by the
4 * DMA mapping code and IOMMU drivers.
6 #ifndef _LINUX_DMA_DIRECT_H
7 #define _LINUX_DMA_DIRECT_H 1
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-noncoherent.h>
11 #include <linux/memblock.h> /* for min_low_pfn */
12 #include <linux/mem_encrypt.h>
13 #include <linux/swiotlb.h>
15 extern unsigned int zone_dma_bits;
17 #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
18 #include <asm/dma-direct.h>
20 static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
22 dma_addr_t dev_addr = (dma_addr_t)paddr;
24 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
27 static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
29 phys_addr_t paddr = (phys_addr_t)dev_addr;
31 return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
33 #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
35 #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
36 bool force_dma_unencrypted(struct device *dev);
38 static inline bool force_dma_unencrypted(struct device *dev)
42 #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
45 * If memory encryption is supported, phys_to_dma will set the memory encryption
46 * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
47 * and __dma_to_phys versions should only be used on non-encrypted memory for
48 * special occasions like DMA coherent buffers.
50 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
52 return __sme_set(__phys_to_dma(dev, paddr));
55 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
57 return __sme_clr(__dma_to_phys(dev, daddr));
60 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
63 dma_addr_t end = addr + size - 1;
68 if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
69 min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
72 return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
75 u64 dma_direct_get_required_mask(struct device *dev);
76 gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
78 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
79 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
80 gfp_t gfp, unsigned long attrs);
81 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
82 dma_addr_t dma_addr, unsigned long attrs);
83 void *dma_direct_alloc_pages(struct device *dev, size_t size,
84 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
85 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
86 dma_addr_t dma_addr, unsigned long attrs);
87 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
88 void *cpu_addr, dma_addr_t dma_addr, size_t size,
90 bool dma_direct_can_mmap(struct device *dev);
91 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
92 void *cpu_addr, dma_addr_t dma_addr, size_t size,
94 int dma_direct_supported(struct device *dev, u64 mask);
95 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
96 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
97 enum dma_data_direction dir, unsigned long attrs);
98 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
99 size_t size, enum dma_data_direction dir, unsigned long attrs);
100 size_t dma_direct_max_mapping_size(struct device *dev);
102 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
103 defined(CONFIG_SWIOTLB)
104 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
105 int nents, enum dma_data_direction dir);
107 static inline void dma_direct_sync_sg_for_device(struct device *dev,
108 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
113 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
114 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
115 defined(CONFIG_SWIOTLB)
116 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
117 int nents, enum dma_data_direction dir, unsigned long attrs);
118 void dma_direct_sync_sg_for_cpu(struct device *dev,
119 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
121 static inline void dma_direct_unmap_sg(struct device *dev,
122 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
126 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
127 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
132 static inline void dma_direct_sync_single_for_device(struct device *dev,
133 dma_addr_t addr, size_t size, enum dma_data_direction dir)
135 phys_addr_t paddr = dma_to_phys(dev, addr);
137 if (unlikely(is_swiotlb_buffer(paddr)))
138 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
140 if (!dev_is_dma_coherent(dev))
141 arch_sync_dma_for_device(paddr, size, dir);
144 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
145 dma_addr_t addr, size_t size, enum dma_data_direction dir)
147 phys_addr_t paddr = dma_to_phys(dev, addr);
149 if (!dev_is_dma_coherent(dev)) {
150 arch_sync_dma_for_cpu(paddr, size, dir);
151 arch_sync_dma_for_cpu_all();
154 if (unlikely(is_swiotlb_buffer(paddr)))
155 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
158 static inline dma_addr_t dma_direct_map_page(struct device *dev,
159 struct page *page, unsigned long offset, size_t size,
160 enum dma_data_direction dir, unsigned long attrs)
162 phys_addr_t phys = page_to_phys(page) + offset;
163 dma_addr_t dma_addr = phys_to_dma(dev, phys);
165 if (unlikely(swiotlb_force == SWIOTLB_FORCE))
166 return swiotlb_map(dev, phys, size, dir, attrs);
168 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
169 if (swiotlb_force != SWIOTLB_NO_FORCE)
170 return swiotlb_map(dev, phys, size, dir, attrs);
172 dev_WARN_ONCE(dev, 1,
173 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
174 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
175 return DMA_MAPPING_ERROR;
178 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
179 arch_sync_dma_for_device(phys, size, dir);
183 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
184 size_t size, enum dma_data_direction dir, unsigned long attrs)
186 phys_addr_t phys = dma_to_phys(dev, addr);
188 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
189 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
191 if (unlikely(is_swiotlb_buffer(phys)))
192 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
194 #endif /* _LINUX_DMA_DIRECT_H */