2 * A fairly generic DMA-API to IOMMU-API glue layer.
4 * Copyright (C) 2014-2015 ARM Ltd.
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/acpi_iort.h>
23 #include <linux/device.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/dma-iommu.h>
26 #include <linux/dma-noncoherent.h>
27 #include <linux/gfp.h>
28 #include <linux/huge_mm.h>
29 #include <linux/iommu.h>
30 #include <linux/iova.h>
31 #include <linux/irq.h>
33 #include <linux/pci.h>
34 #include <linux/scatterlist.h>
35 #include <linux/vmalloc.h>
37 struct iommu_dma_msi_page {
38 struct list_head list;
43 enum iommu_dma_cookie_type {
44 IOMMU_DMA_IOVA_COOKIE,
48 struct iommu_dma_cookie {
49 enum iommu_dma_cookie_type type;
51 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
52 struct iova_domain iovad;
53 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
56 struct list_head msi_page_list;
59 /* Domain for flush queue callback; NULL if flush queue not in use */
60 struct iommu_domain *fq_domain;
63 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
65 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
66 return cookie->iovad.granule;
70 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
72 struct iommu_dma_cookie *cookie;
74 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
76 spin_lock_init(&cookie->msi_lock);
77 INIT_LIST_HEAD(&cookie->msi_page_list);
84 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
85 * @domain: IOMMU domain to prepare for DMA-API usage
87 * IOMMU drivers should normally call this from their domain_alloc
88 * callback when domain->type == IOMMU_DOMAIN_DMA.
90 int iommu_get_dma_cookie(struct iommu_domain *domain)
92 if (domain->iova_cookie)
95 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
96 if (!domain->iova_cookie)
101 EXPORT_SYMBOL(iommu_get_dma_cookie);
104 * iommu_get_msi_cookie - Acquire just MSI remapping resources
105 * @domain: IOMMU domain to prepare
106 * @base: Start address of IOVA region for MSI mappings
108 * Users who manage their own IOVA allocation and do not want DMA API support,
109 * but would still like to take advantage of automatic MSI remapping, can use
110 * this to initialise their own domain appropriately. Users should reserve a
111 * contiguous IOVA region, starting at @base, large enough to accommodate the
112 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
113 * used by the devices attached to @domain.
115 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
117 struct iommu_dma_cookie *cookie;
119 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
122 if (domain->iova_cookie)
125 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
129 cookie->msi_iova = base;
130 domain->iova_cookie = cookie;
133 EXPORT_SYMBOL(iommu_get_msi_cookie);
136 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
137 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
138 * iommu_get_msi_cookie()
140 * IOMMU drivers should normally call this from their domain_free callback.
142 void iommu_put_dma_cookie(struct iommu_domain *domain)
144 struct iommu_dma_cookie *cookie = domain->iova_cookie;
145 struct iommu_dma_msi_page *msi, *tmp;
150 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
151 put_iova_domain(&cookie->iovad);
153 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
154 list_del(&msi->list);
158 domain->iova_cookie = NULL;
160 EXPORT_SYMBOL(iommu_put_dma_cookie);
163 * iommu_dma_get_resv_regions - Reserved region driver helper
164 * @dev: Device from iommu_get_resv_regions()
165 * @list: Reserved region list from iommu_get_resv_regions()
167 * IOMMU drivers can use this to implement their .get_resv_regions callback
168 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
169 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
172 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
175 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
176 iort_iommu_msi_get_resv_regions(dev, list);
179 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
181 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
182 phys_addr_t start, phys_addr_t end)
184 struct iova_domain *iovad = &cookie->iovad;
185 struct iommu_dma_msi_page *msi_page;
188 start -= iova_offset(iovad, start);
189 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
191 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
195 for (i = 0; i < num_pages; i++) {
196 msi_page[i].phys = start;
197 msi_page[i].iova = start;
198 INIT_LIST_HEAD(&msi_page[i].list);
199 list_add(&msi_page[i].list, &cookie->msi_page_list);
200 start += iovad->granule;
206 static int iova_reserve_pci_windows(struct pci_dev *dev,
207 struct iova_domain *iovad)
209 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
210 struct resource_entry *window;
211 unsigned long lo, hi;
212 phys_addr_t start = 0, end;
214 resource_list_for_each_entry(window, &bridge->windows) {
215 if (resource_type(window->res) != IORESOURCE_MEM)
218 lo = iova_pfn(iovad, window->res->start - window->offset);
219 hi = iova_pfn(iovad, window->res->end - window->offset);
220 reserve_iova(iovad, lo, hi);
223 /* Get reserved DMA windows from host bridge */
224 resource_list_for_each_entry(window, &bridge->dma_ranges) {
225 end = window->res->start - window->offset;
228 lo = iova_pfn(iovad, start);
229 hi = iova_pfn(iovad, end);
230 reserve_iova(iovad, lo, hi);
232 /* dma_ranges list should be sorted */
233 dev_err(&dev->dev, "Failed to reserve IOVA\n");
237 start = window->res->end - window->offset + 1;
238 /* If window is last entry */
239 if (window->node.next == &bridge->dma_ranges &&
240 end != ~(dma_addr_t)0) {
241 end = ~(dma_addr_t)0;
249 static int iova_reserve_iommu_regions(struct device *dev,
250 struct iommu_domain *domain)
252 struct iommu_dma_cookie *cookie = domain->iova_cookie;
253 struct iova_domain *iovad = &cookie->iovad;
254 struct iommu_resv_region *region;
255 LIST_HEAD(resv_regions);
258 if (dev_is_pci(dev)) {
259 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
264 iommu_get_resv_regions(dev, &resv_regions);
265 list_for_each_entry(region, &resv_regions, list) {
266 unsigned long lo, hi;
268 /* We ARE the software that manages these! */
269 if (region->type == IOMMU_RESV_SW_MSI)
272 lo = iova_pfn(iovad, region->start);
273 hi = iova_pfn(iovad, region->start + region->length - 1);
274 reserve_iova(iovad, lo, hi);
276 if (region->type == IOMMU_RESV_MSI)
277 ret = cookie_init_hw_msi_region(cookie, region->start,
278 region->start + region->length);
282 iommu_put_resv_regions(dev, &resv_regions);
287 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
289 struct iommu_dma_cookie *cookie;
290 struct iommu_domain *domain;
292 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
293 domain = cookie->fq_domain;
295 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
296 * implies that ops->flush_iotlb_all must be non-NULL.
298 domain->ops->flush_iotlb_all(domain);
302 * iommu_dma_init_domain - Initialise a DMA mapping domain
303 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
304 * @base: IOVA at which the mappable address space starts
305 * @size: Size of IOVA space
306 * @dev: Device the domain is being initialised for
308 * @base and @size should be exact multiples of IOMMU page granularity to
309 * avoid rounding surprises. If necessary, we reserve the page at address 0
310 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
311 * any change which could make prior IOVAs invalid will fail.
313 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
314 u64 size, struct device *dev)
316 struct iommu_dma_cookie *cookie = domain->iova_cookie;
317 struct iova_domain *iovad = &cookie->iovad;
318 unsigned long order, base_pfn;
321 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
324 /* Use the smallest supported page size for IOVA granularity */
325 order = __ffs(domain->pgsize_bitmap);
326 base_pfn = max_t(unsigned long, 1, base >> order);
328 /* Check the domain allows at least some access to the device... */
329 if (domain->geometry.force_aperture) {
330 if (base > domain->geometry.aperture_end ||
331 base + size <= domain->geometry.aperture_start) {
332 pr_warn("specified DMA range outside IOMMU capability\n");
335 /* ...then finally give it a kicking to make sure it fits */
336 base_pfn = max_t(unsigned long, base_pfn,
337 domain->geometry.aperture_start >> order);
340 /* start_pfn is always nonzero for an already-initialised domain */
341 if (iovad->start_pfn) {
342 if (1UL << order != iovad->granule ||
343 base_pfn != iovad->start_pfn) {
344 pr_warn("Incompatible range for DMA domain\n");
351 init_iova_domain(iovad, 1UL << order, base_pfn);
353 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
354 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
355 cookie->fq_domain = domain;
356 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
362 return iova_reserve_iommu_regions(dev, domain);
366 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
368 * @dir: Direction of DMA transfer
369 * @coherent: Is the DMA master cache-coherent?
370 * @attrs: DMA attributes for the mapping
372 * Return: corresponding IOMMU API page protection flags
374 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
377 int prot = coherent ? IOMMU_CACHE : 0;
379 if (attrs & DMA_ATTR_PRIVILEGED)
383 case DMA_BIDIRECTIONAL:
384 return prot | IOMMU_READ | IOMMU_WRITE;
386 return prot | IOMMU_READ;
387 case DMA_FROM_DEVICE:
388 return prot | IOMMU_WRITE;
394 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
395 size_t size, dma_addr_t dma_limit, struct device *dev)
397 struct iommu_dma_cookie *cookie = domain->iova_cookie;
398 struct iova_domain *iovad = &cookie->iovad;
399 unsigned long shift, iova_len, iova = 0;
401 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
402 cookie->msi_iova += size;
403 return cookie->msi_iova - size;
406 shift = iova_shift(iovad);
407 iova_len = size >> shift;
409 * Freeing non-power-of-two-sized allocations back into the IOVA caches
410 * will come back to bite us badly, so we have to waste a bit of space
411 * rounding up anything cacheable to make sure that can't happen. The
412 * order of the unadjusted size will still match upon freeing.
414 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
415 iova_len = roundup_pow_of_two(iova_len);
417 if (dev->bus_dma_mask)
418 dma_limit &= dev->bus_dma_mask;
420 if (domain->geometry.force_aperture)
421 dma_limit = min(dma_limit, domain->geometry.aperture_end);
423 /* Try to get PCI devices a SAC address */
424 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
425 iova = alloc_iova_fast(iovad, iova_len,
426 DMA_BIT_MASK(32) >> shift, false);
429 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
432 return (dma_addr_t)iova << shift;
435 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
436 dma_addr_t iova, size_t size)
438 struct iova_domain *iovad = &cookie->iovad;
440 /* The MSI case is only ever cleaning up its most recent allocation */
441 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
442 cookie->msi_iova -= size;
443 else if (cookie->fq_domain) /* non-strict mode */
444 queue_iova(iovad, iova_pfn(iovad, iova),
445 size >> iova_shift(iovad), 0);
447 free_iova_fast(iovad, iova_pfn(iovad, iova),
448 size >> iova_shift(iovad));
451 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
454 struct iommu_domain *domain = iommu_get_dma_domain(dev);
455 struct iommu_dma_cookie *cookie = domain->iova_cookie;
456 struct iova_domain *iovad = &cookie->iovad;
457 size_t iova_off = iova_offset(iovad, dma_addr);
459 dma_addr -= iova_off;
460 size = iova_align(iovad, size + iova_off);
462 WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
463 if (!cookie->fq_domain)
464 iommu_tlb_sync(domain);
465 iommu_dma_free_iova(cookie, dma_addr, size);
468 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
469 size_t size, int prot)
471 struct iommu_domain *domain = iommu_get_dma_domain(dev);
472 struct iommu_dma_cookie *cookie = domain->iova_cookie;
476 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
477 iova_off = iova_offset(&cookie->iovad, phys);
478 size = iova_align(&cookie->iovad, size + iova_off);
481 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
483 return DMA_MAPPING_ERROR;
485 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
486 iommu_dma_free_iova(cookie, iova, size);
487 return DMA_MAPPING_ERROR;
489 return iova + iova_off;
492 static void __iommu_dma_free_pages(struct page **pages, int count)
495 __free_page(pages[count]);
499 static struct page **__iommu_dma_alloc_pages(struct device *dev,
500 unsigned int count, unsigned long order_mask, gfp_t gfp)
503 unsigned int i = 0, nid = dev_to_node(dev);
505 order_mask &= (2U << MAX_ORDER) - 1;
509 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
513 /* IOMMU can map any pages, so himem can also be used here */
514 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
517 struct page *page = NULL;
518 unsigned int order_size;
521 * Higher-order allocations are a convenience rather
522 * than a necessity, hence using __GFP_NORETRY until
523 * falling back to minimum-order allocations.
525 for (order_mask &= (2U << __fls(count)) - 1;
526 order_mask; order_mask &= ~order_size) {
527 unsigned int order = __fls(order_mask);
528 gfp_t alloc_flags = gfp;
530 order_size = 1U << order;
531 if (order_mask > order_size)
532 alloc_flags |= __GFP_NORETRY;
533 page = alloc_pages_node(nid, alloc_flags, order);
538 if (!PageCompound(page)) {
539 split_page(page, order);
541 } else if (!split_huge_page(page)) {
544 __free_pages(page, order);
547 __iommu_dma_free_pages(pages, i);
557 static struct page **__iommu_dma_get_pages(void *cpu_addr)
559 struct vm_struct *area = find_vm_area(cpu_addr);
561 if (!area || !area->pages)
567 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
568 * @dev: Device to allocate memory for. Must be a real device
569 * attached to an iommu_dma_domain
570 * @size: Size of buffer in bytes
571 * @dma_handle: Out argument for allocated DMA handle
572 * @gfp: Allocation flags
573 * @attrs: DMA attributes for this allocation
575 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
576 * but an IOMMU which supports smaller pages might not map the whole thing.
578 * Return: Mapped virtual address, or NULL on failure.
580 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
581 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
583 struct iommu_domain *domain = iommu_get_dma_domain(dev);
584 struct iommu_dma_cookie *cookie = domain->iova_cookie;
585 struct iova_domain *iovad = &cookie->iovad;
586 bool coherent = dev_is_dma_coherent(dev);
587 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
588 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
589 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
595 *dma_handle = DMA_MAPPING_ERROR;
597 min_size = alloc_sizes & -alloc_sizes;
598 if (min_size < PAGE_SIZE) {
599 min_size = PAGE_SIZE;
600 alloc_sizes |= PAGE_SIZE;
602 size = ALIGN(size, min_size);
604 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
605 alloc_sizes = min_size;
607 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
608 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
613 size = iova_align(iovad, size);
614 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
618 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
621 if (!(ioprot & IOMMU_CACHE)) {
622 struct scatterlist *sg;
625 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
626 arch_dma_prep_coherent(sg_page(sg), sg->length);
629 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
633 vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
634 __builtin_return_address(0));
643 __iommu_dma_unmap(dev, iova, size);
647 iommu_dma_free_iova(cookie, iova, size);
649 __iommu_dma_free_pages(pages, count);
654 * __iommu_dma_mmap - Map a buffer into provided user VMA
655 * @pages: Array representing buffer from __iommu_dma_alloc()
656 * @size: Size of buffer in bytes
657 * @vma: VMA describing requested userspace mapping
659 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
660 * for verifying the correct size and protection of @vma beforehand.
662 static int __iommu_dma_mmap(struct page **pages, size_t size,
663 struct vm_area_struct *vma)
665 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
668 static void iommu_dma_sync_single_for_cpu(struct device *dev,
669 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
673 if (dev_is_dma_coherent(dev))
676 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
677 arch_sync_dma_for_cpu(dev, phys, size, dir);
680 static void iommu_dma_sync_single_for_device(struct device *dev,
681 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
685 if (dev_is_dma_coherent(dev))
688 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
689 arch_sync_dma_for_device(dev, phys, size, dir);
692 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
693 struct scatterlist *sgl, int nelems,
694 enum dma_data_direction dir)
696 struct scatterlist *sg;
699 if (dev_is_dma_coherent(dev))
702 for_each_sg(sgl, sg, nelems, i)
703 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
706 static void iommu_dma_sync_sg_for_device(struct device *dev,
707 struct scatterlist *sgl, int nelems,
708 enum dma_data_direction dir)
710 struct scatterlist *sg;
713 if (dev_is_dma_coherent(dev))
716 for_each_sg(sgl, sg, nelems, i)
717 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
720 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
721 unsigned long offset, size_t size, enum dma_data_direction dir,
724 phys_addr_t phys = page_to_phys(page) + offset;
725 bool coherent = dev_is_dma_coherent(dev);
726 int prot = dma_info_to_prot(dir, coherent, attrs);
727 dma_addr_t dma_handle;
729 dma_handle =__iommu_dma_map(dev, phys, size, prot);
730 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
731 dma_handle != DMA_MAPPING_ERROR)
732 arch_sync_dma_for_device(dev, phys, size, dir);
736 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
737 size_t size, enum dma_data_direction dir, unsigned long attrs)
739 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
740 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
741 __iommu_dma_unmap(dev, dma_handle, size);
745 * Prepare a successfully-mapped scatterlist to give back to the caller.
747 * At this point the segments are already laid out by iommu_dma_map_sg() to
748 * avoid individually crossing any boundaries, so we merely need to check a
749 * segment's start address to avoid concatenating across one.
751 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
754 struct scatterlist *s, *cur = sg;
755 unsigned long seg_mask = dma_get_seg_boundary(dev);
756 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
759 for_each_sg(sg, s, nents, i) {
760 /* Restore this segment's original unaligned fields first */
761 unsigned int s_iova_off = sg_dma_address(s);
762 unsigned int s_length = sg_dma_len(s);
763 unsigned int s_iova_len = s->length;
765 s->offset += s_iova_off;
766 s->length = s_length;
767 sg_dma_address(s) = DMA_MAPPING_ERROR;
771 * Now fill in the real DMA data. If...
772 * - there is a valid output segment to append to
773 * - and this segment starts on an IOVA page boundary
774 * - but doesn't fall at a segment boundary
775 * - and wouldn't make the resulting output segment too long
777 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
778 (cur_len + s_length <= max_len)) {
779 /* ...then concatenate it with the previous one */
782 /* Otherwise start the next output segment */
788 sg_dma_address(cur) = dma_addr + s_iova_off;
791 sg_dma_len(cur) = cur_len;
792 dma_addr += s_iova_len;
794 if (s_length + s_iova_off < s_iova_len)
801 * If mapping failed, then just restore the original list,
802 * but making sure the DMA fields are invalidated.
804 static void __invalidate_sg(struct scatterlist *sg, int nents)
806 struct scatterlist *s;
809 for_each_sg(sg, s, nents, i) {
810 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
811 s->offset += sg_dma_address(s);
813 s->length = sg_dma_len(s);
814 sg_dma_address(s) = DMA_MAPPING_ERROR;
820 * The DMA API client is passing in a scatterlist which could describe
821 * any old buffer layout, but the IOMMU API requires everything to be
822 * aligned to IOMMU pages. Hence the need for this complicated bit of
823 * impedance-matching, to be able to hand off a suitably-aligned list,
824 * but still preserve the original offsets and sizes for the caller.
826 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
827 int nents, enum dma_data_direction dir, unsigned long attrs)
829 struct iommu_domain *domain = iommu_get_dma_domain(dev);
830 struct iommu_dma_cookie *cookie = domain->iova_cookie;
831 struct iova_domain *iovad = &cookie->iovad;
832 struct scatterlist *s, *prev = NULL;
833 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
836 unsigned long mask = dma_get_seg_boundary(dev);
839 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
840 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
843 * Work out how much IOVA space we need, and align the segments to
844 * IOVA granules for the IOMMU driver to handle. With some clever
845 * trickery we can modify the list in-place, but reversibly, by
846 * stashing the unaligned parts in the as-yet-unused DMA fields.
848 for_each_sg(sg, s, nents, i) {
849 size_t s_iova_off = iova_offset(iovad, s->offset);
850 size_t s_length = s->length;
851 size_t pad_len = (mask - iova_len + 1) & mask;
853 sg_dma_address(s) = s_iova_off;
854 sg_dma_len(s) = s_length;
855 s->offset -= s_iova_off;
856 s_length = iova_align(iovad, s_length + s_iova_off);
857 s->length = s_length;
860 * Due to the alignment of our single IOVA allocation, we can
861 * depend on these assumptions about the segment boundary mask:
862 * - If mask size >= IOVA size, then the IOVA range cannot
863 * possibly fall across a boundary, so we don't care.
864 * - If mask size < IOVA size, then the IOVA range must start
865 * exactly on a boundary, therefore we can lay things out
866 * based purely on segment lengths without needing to know
867 * the actual addresses beforehand.
868 * - The mask must be a power of 2, so pad_len == 0 if
869 * iova_len == 0, thus we cannot dereference prev the first
870 * time through here (i.e. before it has a meaningful value).
872 if (pad_len && pad_len < s_length - 1) {
873 prev->length += pad_len;
877 iova_len += s_length;
881 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
886 * We'll leave any physical concatenation to the IOMMU driver's
887 * implementation - it knows better than we do.
889 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
892 return __finalise_sg(dev, sg, nents, iova);
895 iommu_dma_free_iova(cookie, iova, iova_len);
897 __invalidate_sg(sg, nents);
901 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
902 int nents, enum dma_data_direction dir, unsigned long attrs)
904 dma_addr_t start, end;
905 struct scatterlist *tmp;
908 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
909 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
912 * The scatterlist segments are mapped into a single
913 * contiguous IOVA allocation, so this is incredibly easy.
915 start = sg_dma_address(sg);
916 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
917 if (sg_dma_len(tmp) == 0)
921 end = sg_dma_address(sg) + sg_dma_len(sg);
922 __iommu_dma_unmap(dev, start, end - start);
925 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
926 size_t size, enum dma_data_direction dir, unsigned long attrs)
928 return __iommu_dma_map(dev, phys, size,
929 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
932 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
933 size_t size, enum dma_data_direction dir, unsigned long attrs)
935 __iommu_dma_unmap(dev, handle, size);
938 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
940 size_t alloc_size = PAGE_ALIGN(size);
941 int count = alloc_size >> PAGE_SHIFT;
942 struct page *page = NULL, **pages = NULL;
944 /* Non-coherent atomic allocation? Easy */
945 if (dma_free_from_pool(cpu_addr, alloc_size))
948 if (is_vmalloc_addr(cpu_addr)) {
950 * If it the address is remapped, then it's either non-coherent
951 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
953 pages = __iommu_dma_get_pages(cpu_addr);
955 page = vmalloc_to_page(cpu_addr);
956 dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
958 /* Lowmem means a coherent atomic or CMA allocation */
959 page = virt_to_page(cpu_addr);
963 __iommu_dma_free_pages(pages, count);
964 if (page && !dma_release_from_contiguous(dev, page, count))
965 __free_pages(page, get_order(alloc_size));
968 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
969 dma_addr_t handle, unsigned long attrs)
971 __iommu_dma_unmap(dev, handle, size);
972 __iommu_dma_free(dev, size, cpu_addr);
975 static void *iommu_dma_alloc(struct device *dev, size_t size,
976 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
978 bool coherent = dev_is_dma_coherent(dev);
979 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
980 size_t alloc_size = PAGE_ALIGN(size);
981 struct page *page = NULL;
986 if (gfpflags_allow_blocking(gfp) &&
987 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
988 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
990 if (!gfpflags_allow_blocking(gfp) && !coherent) {
991 cpu_addr = dma_alloc_from_pool(alloc_size, &page, gfp);
995 *handle = __iommu_dma_map(dev, page_to_phys(page), size,
997 if (*handle == DMA_MAPPING_ERROR) {
998 dma_free_from_pool(cpu_addr, alloc_size);
1004 if (gfpflags_allow_blocking(gfp))
1005 page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
1006 get_order(alloc_size),
1007 gfp & __GFP_NOWARN);
1009 page = alloc_pages(gfp, get_order(alloc_size));
1013 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
1014 if (*handle == DMA_MAPPING_ERROR)
1015 goto out_free_pages;
1017 if (!coherent || PageHighMem(page)) {
1018 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
1020 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1021 VM_USERMAP, prot, __builtin_return_address(0));
1026 arch_dma_prep_coherent(page, size);
1028 cpu_addr = page_address(page);
1030 memset(cpu_addr, 0, alloc_size);
1033 __iommu_dma_unmap(dev, *handle, size);
1035 if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
1036 __free_pages(page, get_order(alloc_size));
1040 static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
1041 unsigned long pfn, size_t size)
1044 unsigned long nr_vma_pages = vma_pages(vma);
1045 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1046 unsigned long off = vma->vm_pgoff;
1048 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
1049 ret = remap_pfn_range(vma, vma->vm_start,
1051 vma->vm_end - vma->vm_start,
1058 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1059 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1060 unsigned long attrs)
1062 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1063 unsigned long off = vma->vm_pgoff;
1064 struct page **pages;
1067 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
1069 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1072 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1075 if (!is_vmalloc_addr(cpu_addr)) {
1076 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
1077 return __iommu_dma_mmap_pfn(vma, pfn, size);
1080 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1082 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
1083 * hence in the vmalloc space.
1085 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
1086 return __iommu_dma_mmap_pfn(vma, pfn, size);
1089 pages = __iommu_dma_get_pages(cpu_addr);
1092 return __iommu_dma_mmap(pages, size, vma);
1095 static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page,
1098 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1101 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1105 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1106 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1107 unsigned long attrs)
1109 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1110 struct page **pages;
1112 if (!is_vmalloc_addr(cpu_addr)) {
1113 struct page *page = virt_to_page(cpu_addr);
1114 return __iommu_dma_get_sgtable_page(sgt, page, size);
1117 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1119 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
1120 * hence in the vmalloc space.
1122 struct page *page = vmalloc_to_page(cpu_addr);
1123 return __iommu_dma_get_sgtable_page(sgt, page, size);
1126 pages = __iommu_dma_get_pages(cpu_addr);
1129 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1133 static const struct dma_map_ops iommu_dma_ops = {
1134 .alloc = iommu_dma_alloc,
1135 .free = iommu_dma_free,
1136 .mmap = iommu_dma_mmap,
1137 .get_sgtable = iommu_dma_get_sgtable,
1138 .map_page = iommu_dma_map_page,
1139 .unmap_page = iommu_dma_unmap_page,
1140 .map_sg = iommu_dma_map_sg,
1141 .unmap_sg = iommu_dma_unmap_sg,
1142 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1143 .sync_single_for_device = iommu_dma_sync_single_for_device,
1144 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1145 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1146 .map_resource = iommu_dma_map_resource,
1147 .unmap_resource = iommu_dma_unmap_resource,
1151 * The IOMMU core code allocates the default DMA domain, which the underlying
1152 * IOMMU driver needs to support via the dma-iommu layer.
1154 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1156 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1162 * The IOMMU core code allocates the default DMA domain, which the
1163 * underlying IOMMU driver needs to support via the dma-iommu layer.
1165 if (domain->type == IOMMU_DOMAIN_DMA) {
1166 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1168 dev->dma_ops = &iommu_dma_ops;
1173 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1177 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1178 phys_addr_t msi_addr, struct iommu_domain *domain)
1180 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1181 struct iommu_dma_msi_page *msi_page;
1183 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1184 size_t size = cookie_msi_granule(cookie);
1186 msi_addr &= ~(phys_addr_t)(size - 1);
1187 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1188 if (msi_page->phys == msi_addr)
1191 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
1195 iova = __iommu_dma_map(dev, msi_addr, size, prot);
1196 if (iova == DMA_MAPPING_ERROR)
1199 INIT_LIST_HEAD(&msi_page->list);
1200 msi_page->phys = msi_addr;
1201 msi_page->iova = iova;
1202 list_add(&msi_page->list, &cookie->msi_page_list);
1210 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1212 struct device *dev = msi_desc_to_dev(desc);
1213 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1214 struct iommu_dma_cookie *cookie;
1215 struct iommu_dma_msi_page *msi_page;
1216 unsigned long flags;
1218 if (!domain || !domain->iova_cookie) {
1219 desc->iommu_cookie = NULL;
1223 cookie = domain->iova_cookie;
1226 * We disable IRQs to rule out a possible inversion against
1227 * irq_desc_lock if, say, someone tries to retarget the affinity
1228 * of an MSI from within an IPI handler.
1230 spin_lock_irqsave(&cookie->msi_lock, flags);
1231 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1232 spin_unlock_irqrestore(&cookie->msi_lock, flags);
1234 msi_desc_set_iommu_cookie(desc, msi_page);
1241 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1242 struct msi_msg *msg)
1244 struct device *dev = msi_desc_to_dev(desc);
1245 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1246 const struct iommu_dma_msi_page *msi_page;
1248 msi_page = msi_desc_get_iommu_cookie(desc);
1250 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1253 msg->address_hi = upper_32_bits(msi_page->iova);
1254 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1255 msg->address_lo += lower_32_bits(msi_page->iova);
1258 static int iommu_dma_init(void)
1260 return iova_cache_get();
1262 arch_initcall(iommu_dma_init);