1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/swiotlb.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include <linux/crash_dump.h>
27 #include <linux/dma-direct.h>
29 struct iommu_dma_msi_page {
30 struct list_head list;
35 enum iommu_dma_cookie_type {
36 IOMMU_DMA_IOVA_COOKIE,
40 struct iommu_dma_cookie {
41 enum iommu_dma_cookie_type type;
43 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
44 struct iova_domain iovad;
45 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
48 struct list_head msi_page_list;
50 /* Domain for flush queue callback; NULL if flush queue not in use */
51 struct iommu_domain *fq_domain;
54 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
56 void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
57 struct iommu_domain *domain)
59 struct iommu_dma_cookie *cookie = domain->iova_cookie;
60 struct iova_domain *iovad = &cookie->iovad;
62 free_cpu_cached_iovas(cpu, iovad);
65 static void iommu_dma_entry_dtor(unsigned long data)
67 struct page *freelist = (struct page *)data;
70 unsigned long p = (unsigned long)page_address(freelist);
72 freelist = freelist->freelist;
77 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
79 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
80 return cookie->iovad.granule;
84 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
86 struct iommu_dma_cookie *cookie;
88 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
90 INIT_LIST_HEAD(&cookie->msi_page_list);
97 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
98 * @domain: IOMMU domain to prepare for DMA-API usage
100 * IOMMU drivers should normally call this from their domain_alloc
101 * callback when domain->type == IOMMU_DOMAIN_DMA.
103 int iommu_get_dma_cookie(struct iommu_domain *domain)
105 if (domain->iova_cookie)
108 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
109 if (!domain->iova_cookie)
114 EXPORT_SYMBOL(iommu_get_dma_cookie);
117 * iommu_get_msi_cookie - Acquire just MSI remapping resources
118 * @domain: IOMMU domain to prepare
119 * @base: Start address of IOVA region for MSI mappings
121 * Users who manage their own IOVA allocation and do not want DMA API support,
122 * but would still like to take advantage of automatic MSI remapping, can use
123 * this to initialise their own domain appropriately. Users should reserve a
124 * contiguous IOVA region, starting at @base, large enough to accommodate the
125 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
126 * used by the devices attached to @domain.
128 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
130 struct iommu_dma_cookie *cookie;
132 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
135 if (domain->iova_cookie)
138 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
142 cookie->msi_iova = base;
143 domain->iova_cookie = cookie;
146 EXPORT_SYMBOL(iommu_get_msi_cookie);
149 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
150 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
151 * iommu_get_msi_cookie()
153 * IOMMU drivers should normally call this from their domain_free callback.
155 void iommu_put_dma_cookie(struct iommu_domain *domain)
157 struct iommu_dma_cookie *cookie = domain->iova_cookie;
158 struct iommu_dma_msi_page *msi, *tmp;
163 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
164 put_iova_domain(&cookie->iovad);
166 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
167 list_del(&msi->list);
171 domain->iova_cookie = NULL;
173 EXPORT_SYMBOL(iommu_put_dma_cookie);
176 * iommu_dma_get_resv_regions - Reserved region driver helper
177 * @dev: Device from iommu_get_resv_regions()
178 * @list: Reserved region list from iommu_get_resv_regions()
180 * IOMMU drivers can use this to implement their .get_resv_regions callback
181 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
182 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
185 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
188 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
189 iort_iommu_msi_get_resv_regions(dev, list);
192 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
194 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
195 phys_addr_t start, phys_addr_t end)
197 struct iova_domain *iovad = &cookie->iovad;
198 struct iommu_dma_msi_page *msi_page;
201 start -= iova_offset(iovad, start);
202 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
204 for (i = 0; i < num_pages; i++) {
205 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
209 msi_page->phys = start;
210 msi_page->iova = start;
211 INIT_LIST_HEAD(&msi_page->list);
212 list_add(&msi_page->list, &cookie->msi_page_list);
213 start += iovad->granule;
219 static int iova_reserve_pci_windows(struct pci_dev *dev,
220 struct iova_domain *iovad)
222 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
223 struct resource_entry *window;
224 unsigned long lo, hi;
225 phys_addr_t start = 0, end;
227 resource_list_for_each_entry(window, &bridge->windows) {
228 if (resource_type(window->res) != IORESOURCE_MEM)
231 lo = iova_pfn(iovad, window->res->start - window->offset);
232 hi = iova_pfn(iovad, window->res->end - window->offset);
233 reserve_iova(iovad, lo, hi);
236 /* Get reserved DMA windows from host bridge */
237 resource_list_for_each_entry(window, &bridge->dma_ranges) {
238 end = window->res->start - window->offset;
241 lo = iova_pfn(iovad, start);
242 hi = iova_pfn(iovad, end);
243 reserve_iova(iovad, lo, hi);
245 /* dma_ranges list should be sorted */
246 dev_err(&dev->dev, "Failed to reserve IOVA\n");
250 start = window->res->end - window->offset + 1;
251 /* If window is last entry */
252 if (window->node.next == &bridge->dma_ranges &&
253 end != ~(phys_addr_t)0) {
254 end = ~(phys_addr_t)0;
262 static int iova_reserve_iommu_regions(struct device *dev,
263 struct iommu_domain *domain)
265 struct iommu_dma_cookie *cookie = domain->iova_cookie;
266 struct iova_domain *iovad = &cookie->iovad;
267 struct iommu_resv_region *region;
268 LIST_HEAD(resv_regions);
271 if (dev_is_pci(dev)) {
272 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
277 iommu_get_resv_regions(dev, &resv_regions);
278 list_for_each_entry(region, &resv_regions, list) {
279 unsigned long lo, hi;
281 /* We ARE the software that manages these! */
282 if (region->type == IOMMU_RESV_SW_MSI)
285 lo = iova_pfn(iovad, region->start);
286 hi = iova_pfn(iovad, region->start + region->length - 1);
287 reserve_iova(iovad, lo, hi);
289 if (region->type == IOMMU_RESV_MSI)
290 ret = cookie_init_hw_msi_region(cookie, region->start,
291 region->start + region->length);
295 iommu_put_resv_regions(dev, &resv_regions);
300 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
302 struct iommu_dma_cookie *cookie;
303 struct iommu_domain *domain;
305 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
306 domain = cookie->fq_domain;
308 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
309 * implies that ops->flush_iotlb_all must be non-NULL.
311 domain->ops->flush_iotlb_all(domain);
315 * iommu_dma_init_domain - Initialise a DMA mapping domain
316 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
317 * @base: IOVA at which the mappable address space starts
318 * @size: Size of IOVA space
319 * @dev: Device the domain is being initialised for
321 * @base and @size should be exact multiples of IOMMU page granularity to
322 * avoid rounding surprises. If necessary, we reserve the page at address 0
323 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
324 * any change which could make prior IOVAs invalid will fail.
326 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
327 u64 size, struct device *dev)
329 struct iommu_dma_cookie *cookie = domain->iova_cookie;
330 unsigned long order, base_pfn;
331 struct iova_domain *iovad;
334 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
337 iovad = &cookie->iovad;
339 /* Use the smallest supported page size for IOVA granularity */
340 order = __ffs(domain->pgsize_bitmap);
341 base_pfn = max_t(unsigned long, 1, base >> order);
343 /* Check the domain allows at least some access to the device... */
344 if (domain->geometry.force_aperture) {
345 if (base > domain->geometry.aperture_end ||
346 base + size <= domain->geometry.aperture_start) {
347 pr_warn("specified DMA range outside IOMMU capability\n");
350 /* ...then finally give it a kicking to make sure it fits */
351 base_pfn = max_t(unsigned long, base_pfn,
352 domain->geometry.aperture_start >> order);
355 /* start_pfn is always nonzero for an already-initialised domain */
356 if (iovad->start_pfn) {
357 if (1UL << order != iovad->granule ||
358 base_pfn != iovad->start_pfn) {
359 pr_warn("Incompatible range for DMA domain\n");
366 init_iova_domain(iovad, 1UL << order, base_pfn);
368 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
369 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
370 if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
371 iommu_dma_entry_dtor))
372 pr_warn("iova flush queue initialization failed\n");
374 cookie->fq_domain = domain;
380 return iova_reserve_iommu_regions(dev, domain);
384 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
386 * @dir: Direction of DMA transfer
387 * @coherent: Is the DMA master cache-coherent?
388 * @attrs: DMA attributes for the mapping
390 * Return: corresponding IOMMU API page protection flags
392 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
395 int prot = coherent ? IOMMU_CACHE : 0;
397 if (attrs & DMA_ATTR_PRIVILEGED)
401 case DMA_BIDIRECTIONAL:
402 return prot | IOMMU_READ | IOMMU_WRITE;
404 return prot | IOMMU_READ;
405 case DMA_FROM_DEVICE:
406 return prot | IOMMU_WRITE;
412 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
413 size_t size, u64 dma_limit, struct device *dev)
415 struct iommu_dma_cookie *cookie = domain->iova_cookie;
416 struct iova_domain *iovad = &cookie->iovad;
417 unsigned long shift, iova_len, iova = 0;
419 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
420 cookie->msi_iova += size;
421 return cookie->msi_iova - size;
424 shift = iova_shift(iovad);
425 iova_len = size >> shift;
427 * Freeing non-power-of-two-sized allocations back into the IOVA caches
428 * will come back to bite us badly, so we have to waste a bit of space
429 * rounding up anything cacheable to make sure that can't happen. The
430 * order of the unadjusted size will still match upon freeing.
432 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
433 iova_len = roundup_pow_of_two(iova_len);
435 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
437 if (domain->geometry.force_aperture)
438 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
440 /* Try to get PCI devices a SAC address */
441 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
442 iova = alloc_iova_fast(iovad, iova_len,
443 DMA_BIT_MASK(32) >> shift, false);
446 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
449 return (dma_addr_t)iova << shift;
452 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
453 dma_addr_t iova, size_t size, struct page *freelist)
455 struct iova_domain *iovad = &cookie->iovad;
457 /* The MSI case is only ever cleaning up its most recent allocation */
458 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
459 cookie->msi_iova -= size;
460 else if (cookie->fq_domain) /* non-strict mode */
461 queue_iova(iovad, iova_pfn(iovad, iova),
462 size >> iova_shift(iovad),
463 (unsigned long)freelist);
465 free_iova_fast(iovad, iova_pfn(iovad, iova),
466 size >> iova_shift(iovad));
469 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
472 struct iommu_domain *domain = iommu_get_dma_domain(dev);
473 struct iommu_dma_cookie *cookie = domain->iova_cookie;
474 struct iova_domain *iovad = &cookie->iovad;
475 size_t iova_off = iova_offset(iovad, dma_addr);
476 struct iommu_iotlb_gather iotlb_gather;
479 dma_addr -= iova_off;
480 size = iova_align(iovad, size + iova_off);
481 iommu_iotlb_gather_init(&iotlb_gather);
483 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
484 WARN_ON(unmapped != size);
486 if (!cookie->fq_domain)
487 iommu_iotlb_sync(domain, &iotlb_gather);
488 iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
491 static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
492 size_t size, enum dma_data_direction dir,
495 struct iommu_domain *domain = iommu_get_dma_domain(dev);
496 struct iommu_dma_cookie *cookie = domain->iova_cookie;
497 struct iova_domain *iovad = &cookie->iovad;
500 phys = iommu_iova_to_phys(domain, dma_addr);
504 __iommu_dma_unmap(dev, dma_addr, size);
506 if (unlikely(is_swiotlb_buffer(phys)))
507 swiotlb_tbl_unmap_single(dev, phys, size,
508 iova_align(iovad, size), dir, attrs);
511 static bool dev_is_untrusted(struct device *dev)
513 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
516 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
517 size_t size, int prot, u64 dma_mask)
519 struct iommu_domain *domain = iommu_get_dma_domain(dev);
520 struct iommu_dma_cookie *cookie = domain->iova_cookie;
521 struct iova_domain *iovad = &cookie->iovad;
522 size_t iova_off = iova_offset(iovad, phys);
525 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
526 iommu_deferred_attach(dev, domain))
527 return DMA_MAPPING_ERROR;
529 size = iova_align(iovad, size + iova_off);
531 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
533 return DMA_MAPPING_ERROR;
535 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
536 iommu_dma_free_iova(cookie, iova, size, NULL);
537 return DMA_MAPPING_ERROR;
539 return iova + iova_off;
542 static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
543 size_t org_size, dma_addr_t dma_mask, bool coherent,
544 enum dma_data_direction dir, unsigned long attrs)
546 int prot = dma_info_to_prot(dir, coherent, attrs);
547 struct iommu_domain *domain = iommu_get_dma_domain(dev);
548 struct iommu_dma_cookie *cookie = domain->iova_cookie;
549 struct iova_domain *iovad = &cookie->iovad;
550 size_t aligned_size = org_size;
556 * If both the physical buffer start address and size are
557 * page aligned, we don't need to use a bounce page.
559 if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
560 iova_offset(iovad, phys | org_size)) {
561 aligned_size = iova_align(iovad, org_size);
562 phys = swiotlb_tbl_map_single(dev, phys, org_size,
563 aligned_size, dir, attrs);
565 if (phys == DMA_MAPPING_ERROR)
566 return DMA_MAPPING_ERROR;
568 /* Cleanup the padding area. */
569 padding_start = phys_to_virt(phys);
570 padding_size = aligned_size;
572 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
573 (dir == DMA_TO_DEVICE ||
574 dir == DMA_BIDIRECTIONAL)) {
575 padding_start += org_size;
576 padding_size -= org_size;
579 memset(padding_start, 0, padding_size);
582 iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
583 if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
584 swiotlb_tbl_unmap_single(dev, phys, org_size,
585 aligned_size, dir, attrs);
590 static void __iommu_dma_free_pages(struct page **pages, int count)
593 __free_page(pages[count]);
597 static struct page **__iommu_dma_alloc_pages(struct device *dev,
598 unsigned int count, unsigned long order_mask, gfp_t gfp)
601 unsigned int i = 0, nid = dev_to_node(dev);
603 order_mask &= (2U << MAX_ORDER) - 1;
607 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
611 /* IOMMU can map any pages, so himem can also be used here */
612 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
614 /* It makes no sense to muck about with huge pages */
618 struct page *page = NULL;
619 unsigned int order_size;
622 * Higher-order allocations are a convenience rather
623 * than a necessity, hence using __GFP_NORETRY until
624 * falling back to minimum-order allocations.
626 for (order_mask &= (2U << __fls(count)) - 1;
627 order_mask; order_mask &= ~order_size) {
628 unsigned int order = __fls(order_mask);
629 gfp_t alloc_flags = gfp;
631 order_size = 1U << order;
632 if (order_mask > order_size)
633 alloc_flags |= __GFP_NORETRY;
634 page = alloc_pages_node(nid, alloc_flags, order);
638 split_page(page, order);
642 __iommu_dma_free_pages(pages, i);
653 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
654 * @dev: Device to allocate memory for. Must be a real device
655 * attached to an iommu_dma_domain
656 * @size: Size of buffer in bytes
657 * @dma_handle: Out argument for allocated DMA handle
658 * @gfp: Allocation flags
659 * @prot: pgprot_t to use for the remapped mapping
660 * @attrs: DMA attributes for this allocation
662 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
663 * but an IOMMU which supports smaller pages might not map the whole thing.
665 * Return: Mapped virtual address, or NULL on failure.
667 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
668 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
671 struct iommu_domain *domain = iommu_get_dma_domain(dev);
672 struct iommu_dma_cookie *cookie = domain->iova_cookie;
673 struct iova_domain *iovad = &cookie->iovad;
674 bool coherent = dev_is_dma_coherent(dev);
675 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
676 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
682 *dma_handle = DMA_MAPPING_ERROR;
684 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
685 iommu_deferred_attach(dev, domain))
688 min_size = alloc_sizes & -alloc_sizes;
689 if (min_size < PAGE_SIZE) {
690 min_size = PAGE_SIZE;
691 alloc_sizes |= PAGE_SIZE;
693 size = ALIGN(size, min_size);
695 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
696 alloc_sizes = min_size;
698 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
699 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
704 size = iova_align(iovad, size);
705 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
709 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
712 if (!(ioprot & IOMMU_CACHE)) {
713 struct scatterlist *sg;
716 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
717 arch_dma_prep_coherent(sg_page(sg), sg->length);
720 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
724 vaddr = dma_common_pages_remap(pages, size, prot,
725 __builtin_return_address(0));
734 __iommu_dma_unmap(dev, iova, size);
738 iommu_dma_free_iova(cookie, iova, size, NULL);
740 __iommu_dma_free_pages(pages, count);
744 static void iommu_dma_sync_single_for_cpu(struct device *dev,
745 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
749 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
752 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
753 if (!dev_is_dma_coherent(dev))
754 arch_sync_dma_for_cpu(phys, size, dir);
756 if (is_swiotlb_buffer(phys))
757 swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
760 static void iommu_dma_sync_single_for_device(struct device *dev,
761 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
765 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
768 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
769 if (is_swiotlb_buffer(phys))
770 swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
772 if (!dev_is_dma_coherent(dev))
773 arch_sync_dma_for_device(phys, size, dir);
776 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
777 struct scatterlist *sgl, int nelems,
778 enum dma_data_direction dir)
780 struct scatterlist *sg;
783 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
786 for_each_sg(sgl, sg, nelems, i) {
787 if (!dev_is_dma_coherent(dev))
788 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
790 if (is_swiotlb_buffer(sg_phys(sg)))
791 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
796 static void iommu_dma_sync_sg_for_device(struct device *dev,
797 struct scatterlist *sgl, int nelems,
798 enum dma_data_direction dir)
800 struct scatterlist *sg;
803 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
806 for_each_sg(sgl, sg, nelems, i) {
807 if (is_swiotlb_buffer(sg_phys(sg)))
808 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
809 dir, SYNC_FOR_DEVICE);
811 if (!dev_is_dma_coherent(dev))
812 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
816 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
817 unsigned long offset, size_t size, enum dma_data_direction dir,
820 phys_addr_t phys = page_to_phys(page) + offset;
821 bool coherent = dev_is_dma_coherent(dev);
822 dma_addr_t dma_handle;
824 dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
825 coherent, dir, attrs);
826 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
827 dma_handle != DMA_MAPPING_ERROR)
828 arch_sync_dma_for_device(phys, size, dir);
832 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
833 size_t size, enum dma_data_direction dir, unsigned long attrs)
835 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
836 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
837 __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
841 * Prepare a successfully-mapped scatterlist to give back to the caller.
843 * At this point the segments are already laid out by iommu_dma_map_sg() to
844 * avoid individually crossing any boundaries, so we merely need to check a
845 * segment's start address to avoid concatenating across one.
847 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
850 struct scatterlist *s, *cur = sg;
851 unsigned long seg_mask = dma_get_seg_boundary(dev);
852 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
855 for_each_sg(sg, s, nents, i) {
856 /* Restore this segment's original unaligned fields first */
857 unsigned int s_iova_off = sg_dma_address(s);
858 unsigned int s_length = sg_dma_len(s);
859 unsigned int s_iova_len = s->length;
861 s->offset += s_iova_off;
862 s->length = s_length;
863 sg_dma_address(s) = DMA_MAPPING_ERROR;
867 * Now fill in the real DMA data. If...
868 * - there is a valid output segment to append to
869 * - and this segment starts on an IOVA page boundary
870 * - but doesn't fall at a segment boundary
871 * - and wouldn't make the resulting output segment too long
873 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
874 (max_len - cur_len >= s_length)) {
875 /* ...then concatenate it with the previous one */
878 /* Otherwise start the next output segment */
884 sg_dma_address(cur) = dma_addr + s_iova_off;
887 sg_dma_len(cur) = cur_len;
888 dma_addr += s_iova_len;
890 if (s_length + s_iova_off < s_iova_len)
897 * If mapping failed, then just restore the original list,
898 * but making sure the DMA fields are invalidated.
900 static void __invalidate_sg(struct scatterlist *sg, int nents)
902 struct scatterlist *s;
905 for_each_sg(sg, s, nents, i) {
906 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
907 s->offset += sg_dma_address(s);
909 s->length = sg_dma_len(s);
910 sg_dma_address(s) = DMA_MAPPING_ERROR;
915 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
916 int nents, enum dma_data_direction dir, unsigned long attrs)
918 struct scatterlist *s;
921 for_each_sg(sg, s, nents, i)
922 __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
923 sg_dma_len(s), dir, attrs);
926 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
927 int nents, enum dma_data_direction dir, unsigned long attrs)
929 struct scatterlist *s;
932 for_each_sg(sg, s, nents, i) {
933 sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
934 s->length, dma_get_mask(dev),
935 dev_is_dma_coherent(dev), dir, attrs);
936 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
938 sg_dma_len(s) = s->length;
944 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
949 * The DMA API client is passing in a scatterlist which could describe
950 * any old buffer layout, but the IOMMU API requires everything to be
951 * aligned to IOMMU pages. Hence the need for this complicated bit of
952 * impedance-matching, to be able to hand off a suitably-aligned list,
953 * but still preserve the original offsets and sizes for the caller.
955 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
956 int nents, enum dma_data_direction dir, unsigned long attrs)
958 struct iommu_domain *domain = iommu_get_dma_domain(dev);
959 struct iommu_dma_cookie *cookie = domain->iova_cookie;
960 struct iova_domain *iovad = &cookie->iovad;
961 struct scatterlist *s, *prev = NULL;
962 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
965 unsigned long mask = dma_get_seg_boundary(dev);
968 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
969 iommu_deferred_attach(dev, domain))
972 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
973 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
975 if (dev_is_untrusted(dev))
976 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
979 * Work out how much IOVA space we need, and align the segments to
980 * IOVA granules for the IOMMU driver to handle. With some clever
981 * trickery we can modify the list in-place, but reversibly, by
982 * stashing the unaligned parts in the as-yet-unused DMA fields.
984 for_each_sg(sg, s, nents, i) {
985 size_t s_iova_off = iova_offset(iovad, s->offset);
986 size_t s_length = s->length;
987 size_t pad_len = (mask - iova_len + 1) & mask;
989 sg_dma_address(s) = s_iova_off;
990 sg_dma_len(s) = s_length;
991 s->offset -= s_iova_off;
992 s_length = iova_align(iovad, s_length + s_iova_off);
993 s->length = s_length;
996 * Due to the alignment of our single IOVA allocation, we can
997 * depend on these assumptions about the segment boundary mask:
998 * - If mask size >= IOVA size, then the IOVA range cannot
999 * possibly fall across a boundary, so we don't care.
1000 * - If mask size < IOVA size, then the IOVA range must start
1001 * exactly on a boundary, therefore we can lay things out
1002 * based purely on segment lengths without needing to know
1003 * the actual addresses beforehand.
1004 * - The mask must be a power of 2, so pad_len == 0 if
1005 * iova_len == 0, thus we cannot dereference prev the first
1006 * time through here (i.e. before it has a meaningful value).
1008 if (pad_len && pad_len < s_length - 1) {
1009 prev->length += pad_len;
1010 iova_len += pad_len;
1013 iova_len += s_length;
1017 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1019 goto out_restore_sg;
1022 * We'll leave any physical concatenation to the IOMMU driver's
1023 * implementation - it knows better than we do.
1025 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
1028 return __finalise_sg(dev, sg, nents, iova);
1031 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1033 __invalidate_sg(sg, nents);
1037 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1038 int nents, enum dma_data_direction dir, unsigned long attrs)
1040 dma_addr_t start, end;
1041 struct scatterlist *tmp;
1044 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1045 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1047 if (dev_is_untrusted(dev)) {
1048 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1053 * The scatterlist segments are mapped into a single
1054 * contiguous IOVA allocation, so this is incredibly easy.
1056 start = sg_dma_address(sg);
1057 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
1058 if (sg_dma_len(tmp) == 0)
1062 end = sg_dma_address(sg) + sg_dma_len(sg);
1063 __iommu_dma_unmap(dev, start, end - start);
1066 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1067 size_t size, enum dma_data_direction dir, unsigned long attrs)
1069 return __iommu_dma_map(dev, phys, size,
1070 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1074 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1075 size_t size, enum dma_data_direction dir, unsigned long attrs)
1077 __iommu_dma_unmap(dev, handle, size);
1080 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1082 size_t alloc_size = PAGE_ALIGN(size);
1083 int count = alloc_size >> PAGE_SHIFT;
1084 struct page *page = NULL, **pages = NULL;
1086 /* Non-coherent atomic allocation? Easy */
1087 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1088 dma_free_from_pool(dev, cpu_addr, alloc_size))
1091 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1093 * If it the address is remapped, then it's either non-coherent
1094 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1096 pages = dma_common_find_pages(cpu_addr);
1098 page = vmalloc_to_page(cpu_addr);
1099 dma_common_free_remap(cpu_addr, alloc_size);
1101 /* Lowmem means a coherent atomic or CMA allocation */
1102 page = virt_to_page(cpu_addr);
1106 __iommu_dma_free_pages(pages, count);
1108 dma_free_contiguous(dev, page, alloc_size);
1111 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1112 dma_addr_t handle, unsigned long attrs)
1114 __iommu_dma_unmap(dev, handle, size);
1115 __iommu_dma_free(dev, size, cpu_addr);
1118 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1119 struct page **pagep, gfp_t gfp, unsigned long attrs)
1121 bool coherent = dev_is_dma_coherent(dev);
1122 size_t alloc_size = PAGE_ALIGN(size);
1123 int node = dev_to_node(dev);
1124 struct page *page = NULL;
1127 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1129 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1133 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1134 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1136 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1137 prot, __builtin_return_address(0));
1139 goto out_free_pages;
1142 arch_dma_prep_coherent(page, size);
1144 cpu_addr = page_address(page);
1148 memset(cpu_addr, 0, alloc_size);
1151 dma_free_contiguous(dev, page, alloc_size);
1155 static void *iommu_dma_alloc(struct device *dev, size_t size,
1156 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1158 bool coherent = dev_is_dma_coherent(dev);
1159 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1160 struct page *page = NULL;
1165 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1166 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1167 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1168 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1171 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1172 !gfpflags_allow_blocking(gfp) && !coherent)
1173 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1176 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1180 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1181 dev->coherent_dma_mask);
1182 if (*handle == DMA_MAPPING_ERROR) {
1183 __iommu_dma_free(dev, size, cpu_addr);
1190 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1191 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1192 unsigned long attrs)
1194 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1195 unsigned long pfn, off = vma->vm_pgoff;
1198 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1200 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1203 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1206 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1207 struct page **pages = dma_common_find_pages(cpu_addr);
1210 return vm_map_pages(vma, pages, nr_pages);
1211 pfn = vmalloc_to_pfn(cpu_addr);
1213 pfn = page_to_pfn(virt_to_page(cpu_addr));
1216 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1217 vma->vm_end - vma->vm_start,
1221 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1222 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1223 unsigned long attrs)
1228 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1229 struct page **pages = dma_common_find_pages(cpu_addr);
1232 return sg_alloc_table_from_pages(sgt, pages,
1233 PAGE_ALIGN(size) >> PAGE_SHIFT,
1234 0, size, GFP_KERNEL);
1237 page = vmalloc_to_page(cpu_addr);
1239 page = virt_to_page(cpu_addr);
1242 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1244 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1248 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1250 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1252 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1255 static const struct dma_map_ops iommu_dma_ops = {
1256 .alloc = iommu_dma_alloc,
1257 .free = iommu_dma_free,
1258 .alloc_pages = dma_common_alloc_pages,
1259 .free_pages = dma_common_free_pages,
1260 .mmap = iommu_dma_mmap,
1261 .get_sgtable = iommu_dma_get_sgtable,
1262 .map_page = iommu_dma_map_page,
1263 .unmap_page = iommu_dma_unmap_page,
1264 .map_sg = iommu_dma_map_sg,
1265 .unmap_sg = iommu_dma_unmap_sg,
1266 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1267 .sync_single_for_device = iommu_dma_sync_single_for_device,
1268 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1269 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1270 .map_resource = iommu_dma_map_resource,
1271 .unmap_resource = iommu_dma_unmap_resource,
1272 .get_merge_boundary = iommu_dma_get_merge_boundary,
1276 * The IOMMU core code allocates the default DMA domain, which the underlying
1277 * IOMMU driver needs to support via the dma-iommu layer.
1279 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1281 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1287 * The IOMMU core code allocates the default DMA domain, which the
1288 * underlying IOMMU driver needs to support via the dma-iommu layer.
1290 if (domain->type == IOMMU_DOMAIN_DMA) {
1291 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1293 dev->dma_ops = &iommu_dma_ops;
1298 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1302 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1303 phys_addr_t msi_addr, struct iommu_domain *domain)
1305 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1306 struct iommu_dma_msi_page *msi_page;
1308 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1309 size_t size = cookie_msi_granule(cookie);
1311 msi_addr &= ~(phys_addr_t)(size - 1);
1312 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1313 if (msi_page->phys == msi_addr)
1316 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1320 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1324 if (iommu_map(domain, iova, msi_addr, size, prot))
1327 INIT_LIST_HEAD(&msi_page->list);
1328 msi_page->phys = msi_addr;
1329 msi_page->iova = iova;
1330 list_add(&msi_page->list, &cookie->msi_page_list);
1334 iommu_dma_free_iova(cookie, iova, size, NULL);
1340 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1342 struct device *dev = msi_desc_to_dev(desc);
1343 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1344 struct iommu_dma_msi_page *msi_page;
1345 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1347 if (!domain || !domain->iova_cookie) {
1348 desc->iommu_cookie = NULL;
1353 * In fact the whole prepare operation should already be serialised by
1354 * irq_domain_mutex further up the callchain, but that's pretty subtle
1355 * on its own, so consider this locking as failsafe documentation...
1357 mutex_lock(&msi_prepare_lock);
1358 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1359 mutex_unlock(&msi_prepare_lock);
1361 msi_desc_set_iommu_cookie(desc, msi_page);
1368 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1369 struct msi_msg *msg)
1371 struct device *dev = msi_desc_to_dev(desc);
1372 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1373 const struct iommu_dma_msi_page *msi_page;
1375 msi_page = msi_desc_get_iommu_cookie(desc);
1377 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1380 msg->address_hi = upper_32_bits(msi_page->iova);
1381 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1382 msg->address_lo += lower_32_bits(msi_page->iova);
1385 static int iommu_dma_init(void)
1387 if (is_kdump_kernel())
1388 static_branch_enable(&iommu_deferred_attach_enabled);
1390 return iova_cache_get();
1392 arch_initcall(iommu_dma_init);