1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/pfn.h>
14 #include <linux/vmalloc.h>
15 #include <linux/set_memory.h>
18 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
19 * it for entirely different regions. In that case the arch code needs to
20 * override the variable below for dma-direct to work properly.
22 unsigned int zone_dma_bits __ro_after_init = 24;
24 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
27 if (force_dma_unencrypted(dev))
28 return __phys_to_dma(dev, phys);
29 return phys_to_dma(dev, phys);
32 static inline struct page *dma_direct_to_page(struct device *dev,
35 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
38 u64 dma_direct_get_required_mask(struct device *dev)
40 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
41 u64 max_dma = phys_to_dma_direct(dev, phys);
43 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
46 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
49 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
51 if (force_dma_unencrypted(dev))
52 *phys_limit = __dma_to_phys(dev, dma_limit);
54 *phys_limit = dma_to_phys(dev, dma_limit);
57 * Optimistically try the zone that the physical address mask falls
58 * into first. If that returns memory that isn't actually addressable
59 * we will fallback to the next lower zone and try again.
61 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
64 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
66 if (*phys_limit <= DMA_BIT_MASK(32))
71 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
73 return phys_to_dma_direct(dev, phys) + size - 1 <=
74 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
78 * Decrypting memory is allowed to block, so if this device requires
79 * unencrypted memory it must come from atomic pools.
81 static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
84 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
86 if (gfpflags_allow_blocking(gfp))
88 if (force_dma_unencrypted(dev))
90 if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
92 if (dma_alloc_need_uncached(dev, attrs))
97 static inline bool dma_should_free_from_pool(struct device *dev,
100 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
102 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
103 !force_dma_unencrypted(dev))
105 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
110 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
111 gfp_t gfp, unsigned long attrs)
113 int node = dev_to_node(dev);
114 struct page *page = NULL;
117 WARN_ON_ONCE(!PAGE_ALIGNED(size));
119 if (attrs & DMA_ATTR_NO_WARN)
122 /* we always manually zero the memory once we are done: */
124 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
126 page = dma_alloc_contiguous(dev, size, gfp);
127 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
128 dma_free_contiguous(dev, page, size);
133 page = alloc_pages_node(node, gfp, get_order(size));
134 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
135 dma_free_contiguous(dev, page, size);
138 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
139 phys_limit < DMA_BIT_MASK(64) &&
140 !(gfp & (GFP_DMA32 | GFP_DMA))) {
145 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
146 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
154 void *dma_direct_alloc_pages(struct device *dev, size_t size,
155 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
161 size = PAGE_ALIGN(size);
163 if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
166 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
168 page = dma_alloc_from_pool(dev, size, &ret, gfp,
175 page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
179 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
180 !force_dma_unencrypted(dev)) {
181 /* remove any dirty cache lines on the kernel alias */
182 if (!PageHighMem(page))
183 arch_dma_prep_coherent(page, size);
184 /* return the page pointer as the opaque cookie */
189 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
190 dma_alloc_need_uncached(dev, attrs)) ||
191 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
192 /* remove any dirty cache lines on the kernel alias */
193 arch_dma_prep_coherent(page, size);
195 /* create a coherent mapping */
196 ret = dma_common_contiguous_remap(page, size,
197 dma_pgprot(dev, PAGE_KERNEL, attrs),
198 __builtin_return_address(0));
201 if (force_dma_unencrypted(dev)) {
202 err = set_memory_decrypted((unsigned long)ret,
203 1 << get_order(size));
207 memset(ret, 0, size);
211 if (PageHighMem(page)) {
213 * Depending on the cma= arguments and per-arch setup
214 * dma_alloc_contiguous could return highmem pages.
215 * Without remapping there is no way to return them here,
216 * so log an error and fail.
218 dev_info(dev, "Rejecting highmem page from CMA.\n");
222 ret = page_address(page);
223 if (force_dma_unencrypted(dev)) {
224 err = set_memory_decrypted((unsigned long)ret,
225 1 << get_order(size));
230 memset(ret, 0, size);
232 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
233 dma_alloc_need_uncached(dev, attrs)) {
234 arch_dma_prep_coherent(page, size);
235 ret = arch_dma_set_uncached(ret, size);
237 goto out_encrypt_pages;
240 if (force_dma_unencrypted(dev))
241 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
243 *dma_handle = phys_to_dma(dev, page_to_phys(page));
247 if (force_dma_unencrypted(dev)) {
248 err = set_memory_encrypted((unsigned long)page_address(page),
249 1 << get_order(size));
250 /* If memory cannot be re-encrypted, it must be leaked */
255 dma_free_contiguous(dev, page, size);
259 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
260 dma_addr_t dma_addr, unsigned long attrs)
262 unsigned int page_order = get_order(size);
264 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
265 if (dma_should_free_from_pool(dev, attrs) &&
266 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
269 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
270 !force_dma_unencrypted(dev)) {
271 /* cpu_addr is a struct page cookie, not a kernel address */
272 dma_free_contiguous(dev, cpu_addr, size);
276 if (force_dma_unencrypted(dev))
277 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
279 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
281 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
282 arch_dma_clear_uncached(cpu_addr, size);
284 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
287 void *dma_direct_alloc(struct device *dev, size_t size,
288 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
290 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
291 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
292 dma_alloc_need_uncached(dev, attrs))
293 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
294 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
297 void dma_direct_free(struct device *dev, size_t size,
298 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
300 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
301 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
302 dma_alloc_need_uncached(dev, attrs))
303 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
305 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
308 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
309 defined(CONFIG_SWIOTLB)
310 void dma_direct_sync_sg_for_device(struct device *dev,
311 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
313 struct scatterlist *sg;
316 for_each_sg(sgl, sg, nents, i) {
317 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
319 if (unlikely(is_swiotlb_buffer(paddr)))
320 swiotlb_tbl_sync_single(dev, paddr, sg->length,
321 dir, SYNC_FOR_DEVICE);
323 if (!dev_is_dma_coherent(dev))
324 arch_sync_dma_for_device(paddr, sg->length,
330 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
331 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
332 defined(CONFIG_SWIOTLB)
333 void dma_direct_sync_sg_for_cpu(struct device *dev,
334 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
336 struct scatterlist *sg;
339 for_each_sg(sgl, sg, nents, i) {
340 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
342 if (!dev_is_dma_coherent(dev))
343 arch_sync_dma_for_cpu(paddr, sg->length, dir);
345 if (unlikely(is_swiotlb_buffer(paddr)))
346 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
350 if (!dev_is_dma_coherent(dev))
351 arch_sync_dma_for_cpu_all();
354 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
355 int nents, enum dma_data_direction dir, unsigned long attrs)
357 struct scatterlist *sg;
360 for_each_sg(sgl, sg, nents, i)
361 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
366 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
367 enum dma_data_direction dir, unsigned long attrs)
370 struct scatterlist *sg;
372 for_each_sg(sgl, sg, nents, i) {
373 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
374 sg->offset, sg->length, dir, attrs);
375 if (sg->dma_address == DMA_MAPPING_ERROR)
377 sg_dma_len(sg) = sg->length;
383 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
387 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
388 size_t size, enum dma_data_direction dir, unsigned long attrs)
390 dma_addr_t dma_addr = paddr;
392 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
394 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
395 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
397 return DMA_MAPPING_ERROR;
403 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
404 void *cpu_addr, dma_addr_t dma_addr, size_t size,
407 struct page *page = dma_direct_to_page(dev, dma_addr);
410 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
412 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
416 bool dma_direct_can_mmap(struct device *dev)
418 return dev_is_dma_coherent(dev) ||
419 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
422 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
423 void *cpu_addr, dma_addr_t dma_addr, size_t size,
426 unsigned long user_count = vma_pages(vma);
427 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
428 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
431 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
433 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
436 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
438 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
439 user_count << PAGE_SHIFT, vma->vm_page_prot);
442 int dma_direct_supported(struct device *dev, u64 mask)
444 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
447 * Because 32-bit DMA masks are so common we expect every architecture
448 * to be able to satisfy them - either by not supporting more physical
449 * memory, or by providing a ZONE_DMA32. If neither is the case, the
450 * architecture needs to use an IOMMU instead of the direct mapping.
452 if (mask >= DMA_BIT_MASK(32))
456 * This check needs to be against the actual bit mask value, so
457 * use __phys_to_dma() here so that the SME encryption mask isn't
460 if (IS_ENABLED(CONFIG_ZONE_DMA))
461 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
462 return mask >= __phys_to_dma(dev, min_mask);
465 size_t dma_direct_max_mapping_size(struct device *dev)
467 /* If SWIOTLB is active, use its maximum mapping size */
468 if (is_swiotlb_active() &&
469 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
470 return swiotlb_max_mapping_size(dev);
474 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
476 return !dev_is_dma_coherent(dev) ||
477 is_swiotlb_buffer(dma_to_phys(dev, dma_addr));