1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include <linux/crash_dump.h>
27 struct iommu_dma_msi_page {
28 struct list_head list;
33 enum iommu_dma_cookie_type {
34 IOMMU_DMA_IOVA_COOKIE,
38 struct iommu_dma_cookie {
39 enum iommu_dma_cookie_type type;
41 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
42 struct iova_domain iovad;
43 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
46 struct list_head msi_page_list;
48 /* Domain for flush queue callback; NULL if flush queue not in use */
49 struct iommu_domain *fq_domain;
52 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
55 return cookie->iovad.granule;
59 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
61 struct iommu_dma_cookie *cookie;
63 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
65 INIT_LIST_HEAD(&cookie->msi_page_list);
72 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
73 * @domain: IOMMU domain to prepare for DMA-API usage
75 * IOMMU drivers should normally call this from their domain_alloc
76 * callback when domain->type == IOMMU_DOMAIN_DMA.
78 int iommu_get_dma_cookie(struct iommu_domain *domain)
80 if (domain->iova_cookie)
83 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
84 if (!domain->iova_cookie)
89 EXPORT_SYMBOL(iommu_get_dma_cookie);
92 * iommu_get_msi_cookie - Acquire just MSI remapping resources
93 * @domain: IOMMU domain to prepare
94 * @base: Start address of IOVA region for MSI mappings
96 * Users who manage their own IOVA allocation and do not want DMA API support,
97 * but would still like to take advantage of automatic MSI remapping, can use
98 * this to initialise their own domain appropriately. Users should reserve a
99 * contiguous IOVA region, starting at @base, large enough to accommodate the
100 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
101 * used by the devices attached to @domain.
103 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105 struct iommu_dma_cookie *cookie;
107 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
110 if (domain->iova_cookie)
113 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
117 cookie->msi_iova = base;
118 domain->iova_cookie = cookie;
121 EXPORT_SYMBOL(iommu_get_msi_cookie);
124 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
125 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
126 * iommu_get_msi_cookie()
128 * IOMMU drivers should normally call this from their domain_free callback.
130 void iommu_put_dma_cookie(struct iommu_domain *domain)
132 struct iommu_dma_cookie *cookie = domain->iova_cookie;
133 struct iommu_dma_msi_page *msi, *tmp;
138 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
139 put_iova_domain(&cookie->iovad);
141 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
142 list_del(&msi->list);
146 domain->iova_cookie = NULL;
148 EXPORT_SYMBOL(iommu_put_dma_cookie);
151 * iommu_dma_get_resv_regions - Reserved region driver helper
152 * @dev: Device from iommu_get_resv_regions()
153 * @list: Reserved region list from iommu_get_resv_regions()
155 * IOMMU drivers can use this to implement their .get_resv_regions callback
156 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
157 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
160 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
163 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
164 iort_iommu_msi_get_resv_regions(dev, list);
167 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
169 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
170 phys_addr_t start, phys_addr_t end)
172 struct iova_domain *iovad = &cookie->iovad;
173 struct iommu_dma_msi_page *msi_page;
176 start -= iova_offset(iovad, start);
177 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
179 for (i = 0; i < num_pages; i++) {
180 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
184 msi_page->phys = start;
185 msi_page->iova = start;
186 INIT_LIST_HEAD(&msi_page->list);
187 list_add(&msi_page->list, &cookie->msi_page_list);
188 start += iovad->granule;
194 static int iova_reserve_pci_windows(struct pci_dev *dev,
195 struct iova_domain *iovad)
197 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
198 struct resource_entry *window;
199 unsigned long lo, hi;
200 phys_addr_t start = 0, end;
202 resource_list_for_each_entry(window, &bridge->windows) {
203 if (resource_type(window->res) != IORESOURCE_MEM)
206 lo = iova_pfn(iovad, window->res->start - window->offset);
207 hi = iova_pfn(iovad, window->res->end - window->offset);
208 reserve_iova(iovad, lo, hi);
211 /* Get reserved DMA windows from host bridge */
212 resource_list_for_each_entry(window, &bridge->dma_ranges) {
213 end = window->res->start - window->offset;
216 lo = iova_pfn(iovad, start);
217 hi = iova_pfn(iovad, end);
218 reserve_iova(iovad, lo, hi);
220 /* dma_ranges list should be sorted */
221 dev_err(&dev->dev, "Failed to reserve IOVA\n");
225 start = window->res->end - window->offset + 1;
226 /* If window is last entry */
227 if (window->node.next == &bridge->dma_ranges &&
228 end != ~(phys_addr_t)0) {
229 end = ~(phys_addr_t)0;
237 static int iova_reserve_iommu_regions(struct device *dev,
238 struct iommu_domain *domain)
240 struct iommu_dma_cookie *cookie = domain->iova_cookie;
241 struct iova_domain *iovad = &cookie->iovad;
242 struct iommu_resv_region *region;
243 LIST_HEAD(resv_regions);
246 if (dev_is_pci(dev)) {
247 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
252 iommu_get_resv_regions(dev, &resv_regions);
253 list_for_each_entry(region, &resv_regions, list) {
254 unsigned long lo, hi;
256 /* We ARE the software that manages these! */
257 if (region->type == IOMMU_RESV_SW_MSI)
260 lo = iova_pfn(iovad, region->start);
261 hi = iova_pfn(iovad, region->start + region->length - 1);
262 reserve_iova(iovad, lo, hi);
264 if (region->type == IOMMU_RESV_MSI)
265 ret = cookie_init_hw_msi_region(cookie, region->start,
266 region->start + region->length);
270 iommu_put_resv_regions(dev, &resv_regions);
275 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
277 struct iommu_dma_cookie *cookie;
278 struct iommu_domain *domain;
280 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
281 domain = cookie->fq_domain;
283 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
284 * implies that ops->flush_iotlb_all must be non-NULL.
286 domain->ops->flush_iotlb_all(domain);
290 * iommu_dma_init_domain - Initialise a DMA mapping domain
291 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
292 * @base: IOVA at which the mappable address space starts
293 * @size: Size of IOVA space
294 * @dev: Device the domain is being initialised for
296 * @base and @size should be exact multiples of IOMMU page granularity to
297 * avoid rounding surprises. If necessary, we reserve the page at address 0
298 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
299 * any change which could make prior IOVAs invalid will fail.
301 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
302 u64 size, struct device *dev)
304 struct iommu_dma_cookie *cookie = domain->iova_cookie;
305 unsigned long order, base_pfn;
306 struct iova_domain *iovad;
309 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
312 iovad = &cookie->iovad;
314 /* Use the smallest supported page size for IOVA granularity */
315 order = __ffs(domain->pgsize_bitmap);
316 base_pfn = max_t(unsigned long, 1, base >> order);
318 /* Check the domain allows at least some access to the device... */
319 if (domain->geometry.force_aperture) {
320 if (base > domain->geometry.aperture_end ||
321 base + size <= domain->geometry.aperture_start) {
322 pr_warn("specified DMA range outside IOMMU capability\n");
325 /* ...then finally give it a kicking to make sure it fits */
326 base_pfn = max_t(unsigned long, base_pfn,
327 domain->geometry.aperture_start >> order);
330 /* start_pfn is always nonzero for an already-initialised domain */
331 if (iovad->start_pfn) {
332 if (1UL << order != iovad->granule ||
333 base_pfn != iovad->start_pfn) {
334 pr_warn("Incompatible range for DMA domain\n");
341 init_iova_domain(iovad, 1UL << order, base_pfn);
343 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
344 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
345 if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
347 pr_warn("iova flush queue initialization failed\n");
349 cookie->fq_domain = domain;
355 return iova_reserve_iommu_regions(dev, domain);
358 static int iommu_dma_deferred_attach(struct device *dev,
359 struct iommu_domain *domain)
361 const struct iommu_ops *ops = domain->ops;
363 if (!is_kdump_kernel())
366 if (unlikely(ops->is_attach_deferred &&
367 ops->is_attach_deferred(domain, dev)))
368 return iommu_attach_device(domain, dev);
374 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
376 * @dir: Direction of DMA transfer
377 * @coherent: Is the DMA master cache-coherent?
378 * @attrs: DMA attributes for the mapping
380 * Return: corresponding IOMMU API page protection flags
382 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
385 int prot = coherent ? IOMMU_CACHE : 0;
387 if (attrs & DMA_ATTR_PRIVILEGED)
391 case DMA_BIDIRECTIONAL:
392 return prot | IOMMU_READ | IOMMU_WRITE;
394 return prot | IOMMU_READ;
395 case DMA_FROM_DEVICE:
396 return prot | IOMMU_WRITE;
402 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
403 size_t size, u64 dma_limit, struct device *dev)
405 struct iommu_dma_cookie *cookie = domain->iova_cookie;
406 struct iova_domain *iovad = &cookie->iovad;
407 unsigned long shift, iova_len, iova = 0;
409 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
410 cookie->msi_iova += size;
411 return cookie->msi_iova - size;
414 shift = iova_shift(iovad);
415 iova_len = size >> shift;
417 * Freeing non-power-of-two-sized allocations back into the IOVA caches
418 * will come back to bite us badly, so we have to waste a bit of space
419 * rounding up anything cacheable to make sure that can't happen. The
420 * order of the unadjusted size will still match upon freeing.
422 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
423 iova_len = roundup_pow_of_two(iova_len);
425 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
427 if (domain->geometry.force_aperture)
428 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
430 /* Try to get PCI devices a SAC address */
431 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
432 iova = alloc_iova_fast(iovad, iova_len,
433 DMA_BIT_MASK(32) >> shift, false);
436 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
439 return (dma_addr_t)iova << shift;
442 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
443 dma_addr_t iova, size_t size)
445 struct iova_domain *iovad = &cookie->iovad;
447 /* The MSI case is only ever cleaning up its most recent allocation */
448 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
449 cookie->msi_iova -= size;
450 else if (cookie->fq_domain) /* non-strict mode */
451 queue_iova(iovad, iova_pfn(iovad, iova),
452 size >> iova_shift(iovad), 0);
454 free_iova_fast(iovad, iova_pfn(iovad, iova),
455 size >> iova_shift(iovad));
458 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
461 struct iommu_domain *domain = iommu_get_dma_domain(dev);
462 struct iommu_dma_cookie *cookie = domain->iova_cookie;
463 struct iova_domain *iovad = &cookie->iovad;
464 size_t iova_off = iova_offset(iovad, dma_addr);
465 struct iommu_iotlb_gather iotlb_gather;
468 dma_addr -= iova_off;
469 size = iova_align(iovad, size + iova_off);
470 iommu_iotlb_gather_init(&iotlb_gather);
472 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
473 WARN_ON(unmapped != size);
475 if (!cookie->fq_domain)
476 iommu_iotlb_sync(domain, &iotlb_gather);
477 iommu_dma_free_iova(cookie, dma_addr, size);
480 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
481 size_t size, int prot, u64 dma_mask)
483 struct iommu_domain *domain = iommu_get_dma_domain(dev);
484 struct iommu_dma_cookie *cookie = domain->iova_cookie;
485 struct iova_domain *iovad = &cookie->iovad;
486 size_t iova_off = iova_offset(iovad, phys);
489 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
490 return DMA_MAPPING_ERROR;
492 size = iova_align(iovad, size + iova_off);
494 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
496 return DMA_MAPPING_ERROR;
498 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
499 iommu_dma_free_iova(cookie, iova, size);
500 return DMA_MAPPING_ERROR;
502 return iova + iova_off;
505 static void __iommu_dma_free_pages(struct page **pages, int count)
508 __free_page(pages[count]);
512 static struct page **__iommu_dma_alloc_pages(struct device *dev,
513 unsigned int count, unsigned long order_mask, gfp_t gfp)
516 unsigned int i = 0, nid = dev_to_node(dev);
518 order_mask &= (2U << MAX_ORDER) - 1;
522 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
526 /* IOMMU can map any pages, so himem can also be used here */
527 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
529 /* It makes no sense to muck about with huge pages */
533 struct page *page = NULL;
534 unsigned int order_size;
537 * Higher-order allocations are a convenience rather
538 * than a necessity, hence using __GFP_NORETRY until
539 * falling back to minimum-order allocations.
541 for (order_mask &= (2U << __fls(count)) - 1;
542 order_mask; order_mask &= ~order_size) {
543 unsigned int order = __fls(order_mask);
544 gfp_t alloc_flags = gfp;
546 order_size = 1U << order;
547 if (order_mask > order_size)
548 alloc_flags |= __GFP_NORETRY;
549 page = alloc_pages_node(nid, alloc_flags, order);
553 split_page(page, order);
557 __iommu_dma_free_pages(pages, i);
568 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
569 * @dev: Device to allocate memory for. Must be a real device
570 * attached to an iommu_dma_domain
571 * @size: Size of buffer in bytes
572 * @dma_handle: Out argument for allocated DMA handle
573 * @gfp: Allocation flags
574 * @prot: pgprot_t to use for the remapped mapping
575 * @attrs: DMA attributes for this allocation
577 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
578 * but an IOMMU which supports smaller pages might not map the whole thing.
580 * Return: Mapped virtual address, or NULL on failure.
582 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
583 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
586 struct iommu_domain *domain = iommu_get_dma_domain(dev);
587 struct iommu_dma_cookie *cookie = domain->iova_cookie;
588 struct iova_domain *iovad = &cookie->iovad;
589 bool coherent = dev_is_dma_coherent(dev);
590 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
591 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
597 *dma_handle = DMA_MAPPING_ERROR;
599 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
602 min_size = alloc_sizes & -alloc_sizes;
603 if (min_size < PAGE_SIZE) {
604 min_size = PAGE_SIZE;
605 alloc_sizes |= PAGE_SIZE;
607 size = ALIGN(size, min_size);
609 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
610 alloc_sizes = min_size;
612 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
613 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
618 size = iova_align(iovad, size);
619 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
623 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
626 if (!(ioprot & IOMMU_CACHE)) {
627 struct scatterlist *sg;
630 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
631 arch_dma_prep_coherent(sg_page(sg), sg->length);
634 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
638 vaddr = dma_common_pages_remap(pages, size, prot,
639 __builtin_return_address(0));
648 __iommu_dma_unmap(dev, iova, size);
652 iommu_dma_free_iova(cookie, iova, size);
654 __iommu_dma_free_pages(pages, count);
659 * __iommu_dma_mmap - Map a buffer into provided user VMA
660 * @pages: Array representing buffer from __iommu_dma_alloc()
661 * @size: Size of buffer in bytes
662 * @vma: VMA describing requested userspace mapping
664 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
665 * for verifying the correct size and protection of @vma beforehand.
667 static int __iommu_dma_mmap(struct page **pages, size_t size,
668 struct vm_area_struct *vma)
670 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
673 static void iommu_dma_sync_single_for_cpu(struct device *dev,
674 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
678 if (dev_is_dma_coherent(dev))
681 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
682 arch_sync_dma_for_cpu(phys, size, dir);
685 static void iommu_dma_sync_single_for_device(struct device *dev,
686 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
690 if (dev_is_dma_coherent(dev))
693 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
694 arch_sync_dma_for_device(phys, size, dir);
697 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
698 struct scatterlist *sgl, int nelems,
699 enum dma_data_direction dir)
701 struct scatterlist *sg;
704 if (dev_is_dma_coherent(dev))
707 for_each_sg(sgl, sg, nelems, i)
708 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
711 static void iommu_dma_sync_sg_for_device(struct device *dev,
712 struct scatterlist *sgl, int nelems,
713 enum dma_data_direction dir)
715 struct scatterlist *sg;
718 if (dev_is_dma_coherent(dev))
721 for_each_sg(sgl, sg, nelems, i)
722 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
725 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
726 unsigned long offset, size_t size, enum dma_data_direction dir,
729 phys_addr_t phys = page_to_phys(page) + offset;
730 bool coherent = dev_is_dma_coherent(dev);
731 int prot = dma_info_to_prot(dir, coherent, attrs);
732 dma_addr_t dma_handle;
734 dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
735 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
736 dma_handle != DMA_MAPPING_ERROR)
737 arch_sync_dma_for_device(phys, size, dir);
741 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
742 size_t size, enum dma_data_direction dir, unsigned long attrs)
744 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
745 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
746 __iommu_dma_unmap(dev, dma_handle, size);
750 * Prepare a successfully-mapped scatterlist to give back to the caller.
752 * At this point the segments are already laid out by iommu_dma_map_sg() to
753 * avoid individually crossing any boundaries, so we merely need to check a
754 * segment's start address to avoid concatenating across one.
756 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
759 struct scatterlist *s, *cur = sg;
760 unsigned long seg_mask = dma_get_seg_boundary(dev);
761 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
764 for_each_sg(sg, s, nents, i) {
765 /* Restore this segment's original unaligned fields first */
766 unsigned int s_iova_off = sg_dma_address(s);
767 unsigned int s_length = sg_dma_len(s);
768 unsigned int s_iova_len = s->length;
770 s->offset += s_iova_off;
771 s->length = s_length;
772 sg_dma_address(s) = DMA_MAPPING_ERROR;
776 * Now fill in the real DMA data. If...
777 * - there is a valid output segment to append to
778 * - and this segment starts on an IOVA page boundary
779 * - but doesn't fall at a segment boundary
780 * - and wouldn't make the resulting output segment too long
782 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
783 (max_len - cur_len >= s_length)) {
784 /* ...then concatenate it with the previous one */
787 /* Otherwise start the next output segment */
793 sg_dma_address(cur) = dma_addr + s_iova_off;
796 sg_dma_len(cur) = cur_len;
797 dma_addr += s_iova_len;
799 if (s_length + s_iova_off < s_iova_len)
806 * If mapping failed, then just restore the original list,
807 * but making sure the DMA fields are invalidated.
809 static void __invalidate_sg(struct scatterlist *sg, int nents)
811 struct scatterlist *s;
814 for_each_sg(sg, s, nents, i) {
815 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
816 s->offset += sg_dma_address(s);
818 s->length = sg_dma_len(s);
819 sg_dma_address(s) = DMA_MAPPING_ERROR;
825 * The DMA API client is passing in a scatterlist which could describe
826 * any old buffer layout, but the IOMMU API requires everything to be
827 * aligned to IOMMU pages. Hence the need for this complicated bit of
828 * impedance-matching, to be able to hand off a suitably-aligned list,
829 * but still preserve the original offsets and sizes for the caller.
831 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
832 int nents, enum dma_data_direction dir, unsigned long attrs)
834 struct iommu_domain *domain = iommu_get_dma_domain(dev);
835 struct iommu_dma_cookie *cookie = domain->iova_cookie;
836 struct iova_domain *iovad = &cookie->iovad;
837 struct scatterlist *s, *prev = NULL;
838 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
841 unsigned long mask = dma_get_seg_boundary(dev);
844 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
847 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
848 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
851 * Work out how much IOVA space we need, and align the segments to
852 * IOVA granules for the IOMMU driver to handle. With some clever
853 * trickery we can modify the list in-place, but reversibly, by
854 * stashing the unaligned parts in the as-yet-unused DMA fields.
856 for_each_sg(sg, s, nents, i) {
857 size_t s_iova_off = iova_offset(iovad, s->offset);
858 size_t s_length = s->length;
859 size_t pad_len = (mask - iova_len + 1) & mask;
861 sg_dma_address(s) = s_iova_off;
862 sg_dma_len(s) = s_length;
863 s->offset -= s_iova_off;
864 s_length = iova_align(iovad, s_length + s_iova_off);
865 s->length = s_length;
868 * Due to the alignment of our single IOVA allocation, we can
869 * depend on these assumptions about the segment boundary mask:
870 * - If mask size >= IOVA size, then the IOVA range cannot
871 * possibly fall across a boundary, so we don't care.
872 * - If mask size < IOVA size, then the IOVA range must start
873 * exactly on a boundary, therefore we can lay things out
874 * based purely on segment lengths without needing to know
875 * the actual addresses beforehand.
876 * - The mask must be a power of 2, so pad_len == 0 if
877 * iova_len == 0, thus we cannot dereference prev the first
878 * time through here (i.e. before it has a meaningful value).
880 if (pad_len && pad_len < s_length - 1) {
881 prev->length += pad_len;
885 iova_len += s_length;
889 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
894 * We'll leave any physical concatenation to the IOMMU driver's
895 * implementation - it knows better than we do.
897 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
900 return __finalise_sg(dev, sg, nents, iova);
903 iommu_dma_free_iova(cookie, iova, iova_len);
905 __invalidate_sg(sg, nents);
909 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
910 int nents, enum dma_data_direction dir, unsigned long attrs)
912 dma_addr_t start, end;
913 struct scatterlist *tmp;
916 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
917 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
920 * The scatterlist segments are mapped into a single
921 * contiguous IOVA allocation, so this is incredibly easy.
923 start = sg_dma_address(sg);
924 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
925 if (sg_dma_len(tmp) == 0)
929 end = sg_dma_address(sg) + sg_dma_len(sg);
930 __iommu_dma_unmap(dev, start, end - start);
933 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
934 size_t size, enum dma_data_direction dir, unsigned long attrs)
936 return __iommu_dma_map(dev, phys, size,
937 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
941 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
942 size_t size, enum dma_data_direction dir, unsigned long attrs)
944 __iommu_dma_unmap(dev, handle, size);
947 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
949 size_t alloc_size = PAGE_ALIGN(size);
950 int count = alloc_size >> PAGE_SHIFT;
951 struct page *page = NULL, **pages = NULL;
953 /* Non-coherent atomic allocation? Easy */
954 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
955 dma_free_from_pool(dev, cpu_addr, alloc_size))
958 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
960 * If it the address is remapped, then it's either non-coherent
961 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
963 pages = dma_common_find_pages(cpu_addr);
965 page = vmalloc_to_page(cpu_addr);
966 dma_common_free_remap(cpu_addr, alloc_size);
968 /* Lowmem means a coherent atomic or CMA allocation */
969 page = virt_to_page(cpu_addr);
973 __iommu_dma_free_pages(pages, count);
975 dma_free_contiguous(dev, page, alloc_size);
978 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
979 dma_addr_t handle, unsigned long attrs)
981 __iommu_dma_unmap(dev, handle, size);
982 __iommu_dma_free(dev, size, cpu_addr);
985 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
986 struct page **pagep, gfp_t gfp, unsigned long attrs)
988 bool coherent = dev_is_dma_coherent(dev);
989 size_t alloc_size = PAGE_ALIGN(size);
990 int node = dev_to_node(dev);
991 struct page *page = NULL;
994 page = dma_alloc_contiguous(dev, alloc_size, gfp);
996 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1000 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1001 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1003 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1004 prot, __builtin_return_address(0));
1006 goto out_free_pages;
1009 arch_dma_prep_coherent(page, size);
1011 cpu_addr = page_address(page);
1015 memset(cpu_addr, 0, alloc_size);
1018 dma_free_contiguous(dev, page, alloc_size);
1022 static void *iommu_dma_alloc(struct device *dev, size_t size,
1023 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1025 bool coherent = dev_is_dma_coherent(dev);
1026 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1027 struct page *page = NULL;
1032 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1033 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1034 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1035 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1038 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1039 !gfpflags_allow_blocking(gfp) && !coherent)
1040 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1043 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1047 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1048 dev->coherent_dma_mask);
1049 if (*handle == DMA_MAPPING_ERROR) {
1050 __iommu_dma_free(dev, size, cpu_addr);
1057 #ifdef CONFIG_DMA_REMAP
1058 static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
1059 dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
1061 if (!gfpflags_allow_blocking(gfp)) {
1064 page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
1067 return page_address(page);
1070 return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
1074 static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
1075 void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
1077 __iommu_dma_unmap(dev, handle, size);
1078 __iommu_dma_free(dev, size, cpu_addr);
1081 #define iommu_dma_alloc_noncoherent NULL
1082 #define iommu_dma_free_noncoherent NULL
1083 #endif /* CONFIG_DMA_REMAP */
1085 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1086 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1087 unsigned long attrs)
1089 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1090 unsigned long pfn, off = vma->vm_pgoff;
1093 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1095 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1098 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1101 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1102 struct page **pages = dma_common_find_pages(cpu_addr);
1105 return __iommu_dma_mmap(pages, size, vma);
1106 pfn = vmalloc_to_pfn(cpu_addr);
1108 pfn = page_to_pfn(virt_to_page(cpu_addr));
1111 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1112 vma->vm_end - vma->vm_start,
1116 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1117 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1118 unsigned long attrs)
1123 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1124 struct page **pages = dma_common_find_pages(cpu_addr);
1127 return sg_alloc_table_from_pages(sgt, pages,
1128 PAGE_ALIGN(size) >> PAGE_SHIFT,
1129 0, size, GFP_KERNEL);
1132 page = vmalloc_to_page(cpu_addr);
1134 page = virt_to_page(cpu_addr);
1137 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1139 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1143 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1145 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1147 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1150 static const struct dma_map_ops iommu_dma_ops = {
1151 .alloc = iommu_dma_alloc,
1152 .free = iommu_dma_free,
1153 .alloc_pages = dma_common_alloc_pages,
1154 .free_pages = dma_common_free_pages,
1155 .alloc_noncoherent = iommu_dma_alloc_noncoherent,
1156 .free_noncoherent = iommu_dma_free_noncoherent,
1157 .mmap = iommu_dma_mmap,
1158 .get_sgtable = iommu_dma_get_sgtable,
1159 .map_page = iommu_dma_map_page,
1160 .unmap_page = iommu_dma_unmap_page,
1161 .map_sg = iommu_dma_map_sg,
1162 .unmap_sg = iommu_dma_unmap_sg,
1163 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1164 .sync_single_for_device = iommu_dma_sync_single_for_device,
1165 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1166 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1167 .map_resource = iommu_dma_map_resource,
1168 .unmap_resource = iommu_dma_unmap_resource,
1169 .get_merge_boundary = iommu_dma_get_merge_boundary,
1173 * The IOMMU core code allocates the default DMA domain, which the underlying
1174 * IOMMU driver needs to support via the dma-iommu layer.
1176 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1178 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1184 * The IOMMU core code allocates the default DMA domain, which the
1185 * underlying IOMMU driver needs to support via the dma-iommu layer.
1187 if (domain->type == IOMMU_DOMAIN_DMA) {
1188 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1190 dev->dma_ops = &iommu_dma_ops;
1195 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1199 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1200 phys_addr_t msi_addr, struct iommu_domain *domain)
1202 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1203 struct iommu_dma_msi_page *msi_page;
1205 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1206 size_t size = cookie_msi_granule(cookie);
1208 msi_addr &= ~(phys_addr_t)(size - 1);
1209 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1210 if (msi_page->phys == msi_addr)
1213 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1217 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1221 if (iommu_map(domain, iova, msi_addr, size, prot))
1224 INIT_LIST_HEAD(&msi_page->list);
1225 msi_page->phys = msi_addr;
1226 msi_page->iova = iova;
1227 list_add(&msi_page->list, &cookie->msi_page_list);
1231 iommu_dma_free_iova(cookie, iova, size);
1237 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1239 struct device *dev = msi_desc_to_dev(desc);
1240 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1241 struct iommu_dma_msi_page *msi_page;
1242 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1244 if (!domain || !domain->iova_cookie) {
1245 desc->iommu_cookie = NULL;
1250 * In fact the whole prepare operation should already be serialised by
1251 * irq_domain_mutex further up the callchain, but that's pretty subtle
1252 * on its own, so consider this locking as failsafe documentation...
1254 mutex_lock(&msi_prepare_lock);
1255 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1256 mutex_unlock(&msi_prepare_lock);
1258 msi_desc_set_iommu_cookie(desc, msi_page);
1265 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1266 struct msi_msg *msg)
1268 struct device *dev = msi_desc_to_dev(desc);
1269 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1270 const struct iommu_dma_msi_page *msi_page;
1272 msi_page = msi_desc_get_iommu_cookie(desc);
1274 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1277 msg->address_hi = upper_32_bits(msi_page->iova);
1278 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1279 msg->address_lo += lower_32_bits(msi_page->iova);
1282 static int iommu_dma_init(void)
1284 return iova_cache_get();
1286 arch_initcall(iommu_dma_init);