1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/swiotlb.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include <linux/crash_dump.h>
27 #include <linux/dma-direct.h>
29 struct iommu_dma_msi_page {
30 struct list_head list;
35 enum iommu_dma_cookie_type {
36 IOMMU_DMA_IOVA_COOKIE,
40 struct iommu_dma_cookie {
41 enum iommu_dma_cookie_type type;
43 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
44 struct iova_domain iovad;
45 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
48 struct list_head msi_page_list;
50 /* Domain for flush queue callback; NULL if flush queue not in use */
51 struct iommu_domain *fq_domain;
54 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
55 bool iommu_dma_forcedac __read_mostly;
57 static int __init iommu_dma_forcedac_setup(char *str)
59 int ret = kstrtobool(str, &iommu_dma_forcedac);
61 if (!ret && iommu_dma_forcedac)
62 pr_info("Forcing DAC for PCI devices\n");
65 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
67 static void iommu_dma_entry_dtor(unsigned long data)
69 struct page *freelist = (struct page *)data;
72 unsigned long p = (unsigned long)page_address(freelist);
74 freelist = freelist->freelist;
79 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
81 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
82 return cookie->iovad.granule;
86 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
88 struct iommu_dma_cookie *cookie;
90 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
92 INIT_LIST_HEAD(&cookie->msi_page_list);
99 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
100 * @domain: IOMMU domain to prepare for DMA-API usage
102 int iommu_get_dma_cookie(struct iommu_domain *domain)
104 if (domain->iova_cookie)
107 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
108 if (!domain->iova_cookie)
115 * iommu_get_msi_cookie - Acquire just MSI remapping resources
116 * @domain: IOMMU domain to prepare
117 * @base: Start address of IOVA region for MSI mappings
119 * Users who manage their own IOVA allocation and do not want DMA API support,
120 * but would still like to take advantage of automatic MSI remapping, can use
121 * this to initialise their own domain appropriately. Users should reserve a
122 * contiguous IOVA region, starting at @base, large enough to accommodate the
123 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
124 * used by the devices attached to @domain.
126 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
128 struct iommu_dma_cookie *cookie;
130 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
133 if (domain->iova_cookie)
136 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
140 cookie->msi_iova = base;
141 domain->iova_cookie = cookie;
144 EXPORT_SYMBOL(iommu_get_msi_cookie);
147 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
148 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
149 * iommu_get_msi_cookie()
151 void iommu_put_dma_cookie(struct iommu_domain *domain)
153 struct iommu_dma_cookie *cookie = domain->iova_cookie;
154 struct iommu_dma_msi_page *msi, *tmp;
159 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
160 put_iova_domain(&cookie->iovad);
162 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
163 list_del(&msi->list);
167 domain->iova_cookie = NULL;
171 * iommu_dma_get_resv_regions - Reserved region driver helper
172 * @dev: Device from iommu_get_resv_regions()
173 * @list: Reserved region list from iommu_get_resv_regions()
175 * IOMMU drivers can use this to implement their .get_resv_regions callback
176 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
177 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
180 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
183 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
184 iort_iommu_msi_get_resv_regions(dev, list);
187 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
189 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
190 phys_addr_t start, phys_addr_t end)
192 struct iova_domain *iovad = &cookie->iovad;
193 struct iommu_dma_msi_page *msi_page;
196 start -= iova_offset(iovad, start);
197 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
199 for (i = 0; i < num_pages; i++) {
200 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
204 msi_page->phys = start;
205 msi_page->iova = start;
206 INIT_LIST_HEAD(&msi_page->list);
207 list_add(&msi_page->list, &cookie->msi_page_list);
208 start += iovad->granule;
214 static int iova_reserve_pci_windows(struct pci_dev *dev,
215 struct iova_domain *iovad)
217 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
218 struct resource_entry *window;
219 unsigned long lo, hi;
220 phys_addr_t start = 0, end;
222 resource_list_for_each_entry(window, &bridge->windows) {
223 if (resource_type(window->res) != IORESOURCE_MEM)
226 lo = iova_pfn(iovad, window->res->start - window->offset);
227 hi = iova_pfn(iovad, window->res->end - window->offset);
228 reserve_iova(iovad, lo, hi);
231 /* Get reserved DMA windows from host bridge */
232 resource_list_for_each_entry(window, &bridge->dma_ranges) {
233 end = window->res->start - window->offset;
236 lo = iova_pfn(iovad, start);
237 hi = iova_pfn(iovad, end);
238 reserve_iova(iovad, lo, hi);
239 } else if (end < start) {
240 /* dma_ranges list should be sorted */
242 "Failed to reserve IOVA [%pa-%pa]\n",
247 start = window->res->end - window->offset + 1;
248 /* If window is last entry */
249 if (window->node.next == &bridge->dma_ranges &&
250 end != ~(phys_addr_t)0) {
251 end = ~(phys_addr_t)0;
259 static int iova_reserve_iommu_regions(struct device *dev,
260 struct iommu_domain *domain)
262 struct iommu_dma_cookie *cookie = domain->iova_cookie;
263 struct iova_domain *iovad = &cookie->iovad;
264 struct iommu_resv_region *region;
265 LIST_HEAD(resv_regions);
268 if (dev_is_pci(dev)) {
269 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
274 iommu_get_resv_regions(dev, &resv_regions);
275 list_for_each_entry(region, &resv_regions, list) {
276 unsigned long lo, hi;
278 /* We ARE the software that manages these! */
279 if (region->type == IOMMU_RESV_SW_MSI)
282 lo = iova_pfn(iovad, region->start);
283 hi = iova_pfn(iovad, region->start + region->length - 1);
284 reserve_iova(iovad, lo, hi);
286 if (region->type == IOMMU_RESV_MSI)
287 ret = cookie_init_hw_msi_region(cookie, region->start,
288 region->start + region->length);
292 iommu_put_resv_regions(dev, &resv_regions);
297 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
299 struct iommu_dma_cookie *cookie;
300 struct iommu_domain *domain;
302 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
303 domain = cookie->fq_domain;
305 domain->ops->flush_iotlb_all(domain);
308 static bool dev_is_untrusted(struct device *dev)
310 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
313 static bool dev_use_swiotlb(struct device *dev)
315 return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
318 /* sysfs updates are serialised by the mutex of the group owning @domain */
319 int iommu_dma_init_fq(struct iommu_domain *domain)
321 struct iommu_dma_cookie *cookie = domain->iova_cookie;
324 if (cookie->fq_domain)
327 ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
328 iommu_dma_entry_dtor);
330 pr_warn("iova flush queue initialization failed\n");
334 * Prevent incomplete iovad->fq being observable. Pairs with path from
335 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
338 WRITE_ONCE(cookie->fq_domain, domain);
343 * iommu_dma_init_domain - Initialise a DMA mapping domain
344 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
345 * @base: IOVA at which the mappable address space starts
346 * @limit: Last address of the IOVA space
347 * @dev: Device the domain is being initialised for
349 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
350 * avoid rounding surprises. If necessary, we reserve the page at address 0
351 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
352 * any change which could make prior IOVAs invalid will fail.
354 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
355 dma_addr_t limit, struct device *dev)
357 struct iommu_dma_cookie *cookie = domain->iova_cookie;
358 unsigned long order, base_pfn;
359 struct iova_domain *iovad;
361 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
364 iovad = &cookie->iovad;
366 /* Use the smallest supported page size for IOVA granularity */
367 order = __ffs(domain->pgsize_bitmap);
368 base_pfn = max_t(unsigned long, 1, base >> order);
370 /* Check the domain allows at least some access to the device... */
371 if (domain->geometry.force_aperture) {
372 if (base > domain->geometry.aperture_end ||
373 limit < domain->geometry.aperture_start) {
374 pr_warn("specified DMA range outside IOMMU capability\n");
377 /* ...then finally give it a kicking to make sure it fits */
378 base_pfn = max_t(unsigned long, base_pfn,
379 domain->geometry.aperture_start >> order);
382 /* start_pfn is always nonzero for an already-initialised domain */
383 if (iovad->start_pfn) {
384 if (1UL << order != iovad->granule ||
385 base_pfn != iovad->start_pfn) {
386 pr_warn("Incompatible range for DMA domain\n");
393 init_iova_domain(iovad, 1UL << order, base_pfn);
395 /* If the FQ fails we can simply fall back to strict mode */
396 if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
397 domain->type = IOMMU_DOMAIN_DMA;
399 return iova_reserve_iommu_regions(dev, domain);
403 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
405 * @dir: Direction of DMA transfer
406 * @coherent: Is the DMA master cache-coherent?
407 * @attrs: DMA attributes for the mapping
409 * Return: corresponding IOMMU API page protection flags
411 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
414 int prot = coherent ? IOMMU_CACHE : 0;
416 if (attrs & DMA_ATTR_PRIVILEGED)
420 case DMA_BIDIRECTIONAL:
421 return prot | IOMMU_READ | IOMMU_WRITE;
423 return prot | IOMMU_READ;
424 case DMA_FROM_DEVICE:
425 return prot | IOMMU_WRITE;
431 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
432 size_t size, u64 dma_limit, struct device *dev)
434 struct iommu_dma_cookie *cookie = domain->iova_cookie;
435 struct iova_domain *iovad = &cookie->iovad;
436 unsigned long shift, iova_len, iova = 0;
438 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
439 cookie->msi_iova += size;
440 return cookie->msi_iova - size;
443 shift = iova_shift(iovad);
444 iova_len = size >> shift;
446 * Freeing non-power-of-two-sized allocations back into the IOVA caches
447 * will come back to bite us badly, so we have to waste a bit of space
448 * rounding up anything cacheable to make sure that can't happen. The
449 * order of the unadjusted size will still match upon freeing.
451 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
452 iova_len = roundup_pow_of_two(iova_len);
454 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
456 if (domain->geometry.force_aperture)
457 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
459 /* Try to get PCI devices a SAC address */
460 if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
461 iova = alloc_iova_fast(iovad, iova_len,
462 DMA_BIT_MASK(32) >> shift, false);
465 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
468 return (dma_addr_t)iova << shift;
471 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
472 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
474 struct iova_domain *iovad = &cookie->iovad;
476 /* The MSI case is only ever cleaning up its most recent allocation */
477 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
478 cookie->msi_iova -= size;
479 else if (gather && gather->queued)
480 queue_iova(iovad, iova_pfn(iovad, iova),
481 size >> iova_shift(iovad),
482 (unsigned long)gather->freelist);
484 free_iova_fast(iovad, iova_pfn(iovad, iova),
485 size >> iova_shift(iovad));
488 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
491 struct iommu_domain *domain = iommu_get_dma_domain(dev);
492 struct iommu_dma_cookie *cookie = domain->iova_cookie;
493 struct iova_domain *iovad = &cookie->iovad;
494 size_t iova_off = iova_offset(iovad, dma_addr);
495 struct iommu_iotlb_gather iotlb_gather;
498 dma_addr -= iova_off;
499 size = iova_align(iovad, size + iova_off);
500 iommu_iotlb_gather_init(&iotlb_gather);
501 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
503 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
504 WARN_ON(unmapped != size);
506 if (!iotlb_gather.queued)
507 iommu_iotlb_sync(domain, &iotlb_gather);
508 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
511 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
512 size_t size, int prot, u64 dma_mask)
514 struct iommu_domain *domain = iommu_get_dma_domain(dev);
515 struct iommu_dma_cookie *cookie = domain->iova_cookie;
516 struct iova_domain *iovad = &cookie->iovad;
517 size_t iova_off = iova_offset(iovad, phys);
520 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
521 iommu_deferred_attach(dev, domain))
522 return DMA_MAPPING_ERROR;
524 size = iova_align(iovad, size + iova_off);
526 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
528 return DMA_MAPPING_ERROR;
530 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
531 iommu_dma_free_iova(cookie, iova, size, NULL);
532 return DMA_MAPPING_ERROR;
534 return iova + iova_off;
537 static void __iommu_dma_free_pages(struct page **pages, int count)
540 __free_page(pages[count]);
544 static struct page **__iommu_dma_alloc_pages(struct device *dev,
545 unsigned int count, unsigned long order_mask, gfp_t gfp)
548 unsigned int i = 0, nid = dev_to_node(dev);
550 order_mask &= (2U << MAX_ORDER) - 1;
554 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
558 /* IOMMU can map any pages, so himem can also be used here */
559 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
561 /* It makes no sense to muck about with huge pages */
565 struct page *page = NULL;
566 unsigned int order_size;
569 * Higher-order allocations are a convenience rather
570 * than a necessity, hence using __GFP_NORETRY until
571 * falling back to minimum-order allocations.
573 for (order_mask &= (2U << __fls(count)) - 1;
574 order_mask; order_mask &= ~order_size) {
575 unsigned int order = __fls(order_mask);
576 gfp_t alloc_flags = gfp;
578 order_size = 1U << order;
579 if (order_mask > order_size)
580 alloc_flags |= __GFP_NORETRY;
581 page = alloc_pages_node(nid, alloc_flags, order);
585 split_page(page, order);
589 __iommu_dma_free_pages(pages, i);
600 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
601 * but an IOMMU which supports smaller pages might not map the whole thing.
603 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
604 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
607 struct iommu_domain *domain = iommu_get_dma_domain(dev);
608 struct iommu_dma_cookie *cookie = domain->iova_cookie;
609 struct iova_domain *iovad = &cookie->iovad;
610 bool coherent = dev_is_dma_coherent(dev);
611 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
612 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
616 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
617 iommu_deferred_attach(dev, domain))
620 min_size = alloc_sizes & -alloc_sizes;
621 if (min_size < PAGE_SIZE) {
622 min_size = PAGE_SIZE;
623 alloc_sizes |= PAGE_SIZE;
625 size = ALIGN(size, min_size);
627 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
628 alloc_sizes = min_size;
630 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
631 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
636 size = iova_align(iovad, size);
637 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
641 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
644 if (!(ioprot & IOMMU_CACHE)) {
645 struct scatterlist *sg;
648 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
649 arch_dma_prep_coherent(sg_page(sg), sg->length);
652 if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
656 sgt->sgl->dma_address = iova;
657 sgt->sgl->dma_length = size;
663 iommu_dma_free_iova(cookie, iova, size, NULL);
665 __iommu_dma_free_pages(pages, count);
669 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
670 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
677 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
681 *dma_handle = sgt.sgl->dma_address;
683 vaddr = dma_common_pages_remap(pages, size, prot,
684 __builtin_return_address(0));
690 __iommu_dma_unmap(dev, *dma_handle, size);
691 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
695 #ifdef CONFIG_DMA_REMAP
696 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
697 size_t size, enum dma_data_direction dir, gfp_t gfp,
700 struct dma_sgt_handle *sh;
702 sh = kmalloc(sizeof(*sh), gfp);
706 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
715 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
716 struct sg_table *sgt, enum dma_data_direction dir)
718 struct dma_sgt_handle *sh = sgt_handle(sgt);
720 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
721 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
722 sg_free_table(&sh->sgt);
725 #endif /* CONFIG_DMA_REMAP */
727 static void iommu_dma_sync_single_for_cpu(struct device *dev,
728 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
732 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
735 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
736 if (!dev_is_dma_coherent(dev))
737 arch_sync_dma_for_cpu(phys, size, dir);
739 if (is_swiotlb_buffer(dev, phys))
740 swiotlb_sync_single_for_cpu(dev, phys, size, dir);
743 static void iommu_dma_sync_single_for_device(struct device *dev,
744 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
748 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
751 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
752 if (is_swiotlb_buffer(dev, phys))
753 swiotlb_sync_single_for_device(dev, phys, size, dir);
755 if (!dev_is_dma_coherent(dev))
756 arch_sync_dma_for_device(phys, size, dir);
759 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
760 struct scatterlist *sgl, int nelems,
761 enum dma_data_direction dir)
763 struct scatterlist *sg;
766 if (dev_use_swiotlb(dev))
767 for_each_sg(sgl, sg, nelems, i)
768 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
770 else if (!dev_is_dma_coherent(dev))
771 for_each_sg(sgl, sg, nelems, i)
772 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
775 static void iommu_dma_sync_sg_for_device(struct device *dev,
776 struct scatterlist *sgl, int nelems,
777 enum dma_data_direction dir)
779 struct scatterlist *sg;
782 if (dev_use_swiotlb(dev))
783 for_each_sg(sgl, sg, nelems, i)
784 iommu_dma_sync_single_for_device(dev,
787 else if (!dev_is_dma_coherent(dev))
788 for_each_sg(sgl, sg, nelems, i)
789 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
792 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
793 unsigned long offset, size_t size, enum dma_data_direction dir,
796 phys_addr_t phys = page_to_phys(page) + offset;
797 bool coherent = dev_is_dma_coherent(dev);
798 int prot = dma_info_to_prot(dir, coherent, attrs);
799 struct iommu_domain *domain = iommu_get_dma_domain(dev);
800 struct iommu_dma_cookie *cookie = domain->iova_cookie;
801 struct iova_domain *iovad = &cookie->iovad;
802 dma_addr_t iova, dma_mask = dma_get_mask(dev);
805 * If both the physical buffer start address and size are
806 * page aligned, we don't need to use a bounce page.
808 if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
810 size_t padding_size, aligned_size;
812 aligned_size = iova_align(iovad, size);
813 phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
814 iova_mask(iovad), dir, attrs);
816 if (phys == DMA_MAPPING_ERROR)
817 return DMA_MAPPING_ERROR;
819 /* Cleanup the padding area. */
820 padding_start = phys_to_virt(phys);
821 padding_size = aligned_size;
823 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
824 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
825 padding_start += size;
826 padding_size -= size;
829 memset(padding_start, 0, padding_size);
832 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
833 arch_sync_dma_for_device(phys, size, dir);
835 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
836 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
837 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
841 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
842 size_t size, enum dma_data_direction dir, unsigned long attrs)
844 struct iommu_domain *domain = iommu_get_dma_domain(dev);
847 phys = iommu_iova_to_phys(domain, dma_handle);
851 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
852 arch_sync_dma_for_cpu(phys, size, dir);
854 __iommu_dma_unmap(dev, dma_handle, size);
856 if (unlikely(is_swiotlb_buffer(dev, phys)))
857 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
861 * Prepare a successfully-mapped scatterlist to give back to the caller.
863 * At this point the segments are already laid out by iommu_dma_map_sg() to
864 * avoid individually crossing any boundaries, so we merely need to check a
865 * segment's start address to avoid concatenating across one.
867 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
870 struct scatterlist *s, *cur = sg;
871 unsigned long seg_mask = dma_get_seg_boundary(dev);
872 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
875 for_each_sg(sg, s, nents, i) {
876 /* Restore this segment's original unaligned fields first */
877 unsigned int s_iova_off = sg_dma_address(s);
878 unsigned int s_length = sg_dma_len(s);
879 unsigned int s_iova_len = s->length;
881 s->offset += s_iova_off;
882 s->length = s_length;
883 sg_dma_address(s) = DMA_MAPPING_ERROR;
887 * Now fill in the real DMA data. If...
888 * - there is a valid output segment to append to
889 * - and this segment starts on an IOVA page boundary
890 * - but doesn't fall at a segment boundary
891 * - and wouldn't make the resulting output segment too long
893 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
894 (max_len - cur_len >= s_length)) {
895 /* ...then concatenate it with the previous one */
898 /* Otherwise start the next output segment */
904 sg_dma_address(cur) = dma_addr + s_iova_off;
907 sg_dma_len(cur) = cur_len;
908 dma_addr += s_iova_len;
910 if (s_length + s_iova_off < s_iova_len)
917 * If mapping failed, then just restore the original list,
918 * but making sure the DMA fields are invalidated.
920 static void __invalidate_sg(struct scatterlist *sg, int nents)
922 struct scatterlist *s;
925 for_each_sg(sg, s, nents, i) {
926 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
927 s->offset += sg_dma_address(s);
929 s->length = sg_dma_len(s);
930 sg_dma_address(s) = DMA_MAPPING_ERROR;
935 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
936 int nents, enum dma_data_direction dir, unsigned long attrs)
938 struct scatterlist *s;
941 for_each_sg(sg, s, nents, i)
942 iommu_dma_unmap_page(dev, sg_dma_address(s),
943 sg_dma_len(s), dir, attrs);
946 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
947 int nents, enum dma_data_direction dir, unsigned long attrs)
949 struct scatterlist *s;
952 for_each_sg(sg, s, nents, i) {
953 sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
954 s->offset, s->length, dir, attrs);
955 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
957 sg_dma_len(s) = s->length;
963 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
968 * The DMA API client is passing in a scatterlist which could describe
969 * any old buffer layout, but the IOMMU API requires everything to be
970 * aligned to IOMMU pages. Hence the need for this complicated bit of
971 * impedance-matching, to be able to hand off a suitably-aligned list,
972 * but still preserve the original offsets and sizes for the caller.
974 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
975 int nents, enum dma_data_direction dir, unsigned long attrs)
977 struct iommu_domain *domain = iommu_get_dma_domain(dev);
978 struct iommu_dma_cookie *cookie = domain->iova_cookie;
979 struct iova_domain *iovad = &cookie->iovad;
980 struct scatterlist *s, *prev = NULL;
981 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
984 unsigned long mask = dma_get_seg_boundary(dev);
988 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
989 ret = iommu_deferred_attach(dev, domain);
994 if (dev_use_swiotlb(dev))
995 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
997 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
998 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1001 * Work out how much IOVA space we need, and align the segments to
1002 * IOVA granules for the IOMMU driver to handle. With some clever
1003 * trickery we can modify the list in-place, but reversibly, by
1004 * stashing the unaligned parts in the as-yet-unused DMA fields.
1006 for_each_sg(sg, s, nents, i) {
1007 size_t s_iova_off = iova_offset(iovad, s->offset);
1008 size_t s_length = s->length;
1009 size_t pad_len = (mask - iova_len + 1) & mask;
1011 sg_dma_address(s) = s_iova_off;
1012 sg_dma_len(s) = s_length;
1013 s->offset -= s_iova_off;
1014 s_length = iova_align(iovad, s_length + s_iova_off);
1015 s->length = s_length;
1018 * Due to the alignment of our single IOVA allocation, we can
1019 * depend on these assumptions about the segment boundary mask:
1020 * - If mask size >= IOVA size, then the IOVA range cannot
1021 * possibly fall across a boundary, so we don't care.
1022 * - If mask size < IOVA size, then the IOVA range must start
1023 * exactly on a boundary, therefore we can lay things out
1024 * based purely on segment lengths without needing to know
1025 * the actual addresses beforehand.
1026 * - The mask must be a power of 2, so pad_len == 0 if
1027 * iova_len == 0, thus we cannot dereference prev the first
1028 * time through here (i.e. before it has a meaningful value).
1030 if (pad_len && pad_len < s_length - 1) {
1031 prev->length += pad_len;
1032 iova_len += pad_len;
1035 iova_len += s_length;
1039 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1042 goto out_restore_sg;
1046 * We'll leave any physical concatenation to the IOMMU driver's
1047 * implementation - it knows better than we do.
1049 ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
1053 return __finalise_sg(dev, sg, nents, iova);
1056 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1058 __invalidate_sg(sg, nents);
1065 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1066 int nents, enum dma_data_direction dir, unsigned long attrs)
1068 dma_addr_t start, end;
1069 struct scatterlist *tmp;
1072 if (dev_use_swiotlb(dev)) {
1073 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1077 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1078 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1081 * The scatterlist segments are mapped into a single
1082 * contiguous IOVA allocation, so this is incredibly easy.
1084 start = sg_dma_address(sg);
1085 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
1086 if (sg_dma_len(tmp) == 0)
1090 end = sg_dma_address(sg) + sg_dma_len(sg);
1091 __iommu_dma_unmap(dev, start, end - start);
1094 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1095 size_t size, enum dma_data_direction dir, unsigned long attrs)
1097 return __iommu_dma_map(dev, phys, size,
1098 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1102 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1103 size_t size, enum dma_data_direction dir, unsigned long attrs)
1105 __iommu_dma_unmap(dev, handle, size);
1108 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1110 size_t alloc_size = PAGE_ALIGN(size);
1111 int count = alloc_size >> PAGE_SHIFT;
1112 struct page *page = NULL, **pages = NULL;
1114 /* Non-coherent atomic allocation? Easy */
1115 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1116 dma_free_from_pool(dev, cpu_addr, alloc_size))
1119 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1121 * If it the address is remapped, then it's either non-coherent
1122 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1124 pages = dma_common_find_pages(cpu_addr);
1126 page = vmalloc_to_page(cpu_addr);
1127 dma_common_free_remap(cpu_addr, alloc_size);
1129 /* Lowmem means a coherent atomic or CMA allocation */
1130 page = virt_to_page(cpu_addr);
1134 __iommu_dma_free_pages(pages, count);
1136 dma_free_contiguous(dev, page, alloc_size);
1139 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1140 dma_addr_t handle, unsigned long attrs)
1142 __iommu_dma_unmap(dev, handle, size);
1143 __iommu_dma_free(dev, size, cpu_addr);
1146 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1147 struct page **pagep, gfp_t gfp, unsigned long attrs)
1149 bool coherent = dev_is_dma_coherent(dev);
1150 size_t alloc_size = PAGE_ALIGN(size);
1151 int node = dev_to_node(dev);
1152 struct page *page = NULL;
1155 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1157 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1161 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1162 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1164 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1165 prot, __builtin_return_address(0));
1167 goto out_free_pages;
1170 arch_dma_prep_coherent(page, size);
1172 cpu_addr = page_address(page);
1176 memset(cpu_addr, 0, alloc_size);
1179 dma_free_contiguous(dev, page, alloc_size);
1183 static void *iommu_dma_alloc(struct device *dev, size_t size,
1184 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1186 bool coherent = dev_is_dma_coherent(dev);
1187 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1188 struct page *page = NULL;
1193 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1194 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1195 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1196 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1199 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1200 !gfpflags_allow_blocking(gfp) && !coherent)
1201 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1204 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1208 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1209 dev->coherent_dma_mask);
1210 if (*handle == DMA_MAPPING_ERROR) {
1211 __iommu_dma_free(dev, size, cpu_addr);
1218 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1219 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1220 unsigned long attrs)
1222 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1223 unsigned long pfn, off = vma->vm_pgoff;
1226 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1228 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1231 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1234 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1235 struct page **pages = dma_common_find_pages(cpu_addr);
1238 return vm_map_pages(vma, pages, nr_pages);
1239 pfn = vmalloc_to_pfn(cpu_addr);
1241 pfn = page_to_pfn(virt_to_page(cpu_addr));
1244 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1245 vma->vm_end - vma->vm_start,
1249 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1250 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1251 unsigned long attrs)
1256 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1257 struct page **pages = dma_common_find_pages(cpu_addr);
1260 return sg_alloc_table_from_pages(sgt, pages,
1261 PAGE_ALIGN(size) >> PAGE_SHIFT,
1262 0, size, GFP_KERNEL);
1265 page = vmalloc_to_page(cpu_addr);
1267 page = virt_to_page(cpu_addr);
1270 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1272 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1276 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1278 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1280 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1283 static const struct dma_map_ops iommu_dma_ops = {
1284 .alloc = iommu_dma_alloc,
1285 .free = iommu_dma_free,
1286 .alloc_pages = dma_common_alloc_pages,
1287 .free_pages = dma_common_free_pages,
1288 #ifdef CONFIG_DMA_REMAP
1289 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
1290 .free_noncontiguous = iommu_dma_free_noncontiguous,
1292 .mmap = iommu_dma_mmap,
1293 .get_sgtable = iommu_dma_get_sgtable,
1294 .map_page = iommu_dma_map_page,
1295 .unmap_page = iommu_dma_unmap_page,
1296 .map_sg = iommu_dma_map_sg,
1297 .unmap_sg = iommu_dma_unmap_sg,
1298 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1299 .sync_single_for_device = iommu_dma_sync_single_for_device,
1300 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1301 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1302 .map_resource = iommu_dma_map_resource,
1303 .unmap_resource = iommu_dma_unmap_resource,
1304 .get_merge_boundary = iommu_dma_get_merge_boundary,
1308 * The IOMMU core code allocates the default DMA domain, which the underlying
1309 * IOMMU driver needs to support via the dma-iommu layer.
1311 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
1313 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1319 * The IOMMU core code allocates the default DMA domain, which the
1320 * underlying IOMMU driver needs to support via the dma-iommu layer.
1322 if (iommu_is_dma_domain(domain)) {
1323 if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
1325 dev->dma_ops = &iommu_dma_ops;
1330 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1333 EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
1335 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1336 phys_addr_t msi_addr, struct iommu_domain *domain)
1338 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1339 struct iommu_dma_msi_page *msi_page;
1341 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1342 size_t size = cookie_msi_granule(cookie);
1344 msi_addr &= ~(phys_addr_t)(size - 1);
1345 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1346 if (msi_page->phys == msi_addr)
1349 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1353 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1357 if (iommu_map(domain, iova, msi_addr, size, prot))
1360 INIT_LIST_HEAD(&msi_page->list);
1361 msi_page->phys = msi_addr;
1362 msi_page->iova = iova;
1363 list_add(&msi_page->list, &cookie->msi_page_list);
1367 iommu_dma_free_iova(cookie, iova, size, NULL);
1373 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1375 struct device *dev = msi_desc_to_dev(desc);
1376 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1377 struct iommu_dma_msi_page *msi_page;
1378 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1380 if (!domain || !domain->iova_cookie) {
1381 desc->iommu_cookie = NULL;
1386 * In fact the whole prepare operation should already be serialised by
1387 * irq_domain_mutex further up the callchain, but that's pretty subtle
1388 * on its own, so consider this locking as failsafe documentation...
1390 mutex_lock(&msi_prepare_lock);
1391 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1392 mutex_unlock(&msi_prepare_lock);
1394 msi_desc_set_iommu_cookie(desc, msi_page);
1401 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1402 struct msi_msg *msg)
1404 struct device *dev = msi_desc_to_dev(desc);
1405 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1406 const struct iommu_dma_msi_page *msi_page;
1408 msi_page = msi_desc_get_iommu_cookie(desc);
1410 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1413 msg->address_hi = upper_32_bits(msi_page->iova);
1414 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1415 msg->address_lo += lower_32_bits(msi_page->iova);
1418 static int iommu_dma_init(void)
1420 if (is_kdump_kernel())
1421 static_branch_enable(&iommu_deferred_attach_enabled);
1423 return iova_cache_get();
1425 arch_initcall(iommu_dma_init);