1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/atomic.h>
13 #include <linux/crash_dump.h>
14 #include <linux/device.h>
15 #include <linux/dma-direct.h>
16 #include <linux/dma-iommu.h>
17 #include <linux/dma-map-ops.h>
18 #include <linux/gfp.h>
19 #include <linux/huge_mm.h>
20 #include <linux/iommu.h>
21 #include <linux/iova.h>
22 #include <linux/irq.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/scatterlist.h>
27 #include <linux/spinlock.h>
28 #include <linux/swiotlb.h>
29 #include <linux/vmalloc.h>
31 struct iommu_dma_msi_page {
32 struct list_head list;
37 enum iommu_dma_cookie_type {
38 IOMMU_DMA_IOVA_COOKIE,
42 struct iommu_dma_cookie {
43 enum iommu_dma_cookie_type type;
45 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
47 struct iova_domain iovad;
49 struct iova_fq __percpu *fq; /* Flush queue */
50 /* Number of TLB flushes that have been started */
51 atomic64_t fq_flush_start_cnt;
52 /* Number of TLB flushes that have been finished */
53 atomic64_t fq_flush_finish_cnt;
54 /* Timer to regularily empty the flush queues */
55 struct timer_list fq_timer;
56 /* 1 when timer is active, 0 when not */
59 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
62 struct list_head msi_page_list;
64 /* Domain for flush queue callback; NULL if flush queue not in use */
65 struct iommu_domain *fq_domain;
68 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
69 bool iommu_dma_forcedac __read_mostly;
71 static int __init iommu_dma_forcedac_setup(char *str)
73 int ret = kstrtobool(str, &iommu_dma_forcedac);
75 if (!ret && iommu_dma_forcedac)
76 pr_info("Forcing DAC for PCI devices\n");
79 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
81 /* Number of entries per flush queue */
82 #define IOVA_FQ_SIZE 256
84 /* Timeout (in ms) after which entries are flushed from the queue */
85 #define IOVA_FQ_TIMEOUT 10
87 /* Flush queue entry for deferred flushing */
88 struct iova_fq_entry {
89 unsigned long iova_pfn;
91 struct list_head freelist;
92 u64 counter; /* Flush counter when this entry was added */
95 /* Per-CPU flush queue structure */
97 struct iova_fq_entry entries[IOVA_FQ_SIZE];
98 unsigned int head, tail;
102 #define fq_ring_for_each(i, fq) \
103 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
105 static inline bool fq_full(struct iova_fq *fq)
107 assert_spin_locked(&fq->lock);
108 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
111 static inline unsigned int fq_ring_add(struct iova_fq *fq)
113 unsigned int idx = fq->tail;
115 assert_spin_locked(&fq->lock);
117 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
122 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
124 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
127 assert_spin_locked(&fq->lock);
129 fq_ring_for_each(idx, fq) {
131 if (fq->entries[idx].counter >= counter)
134 put_pages_list(&fq->entries[idx].freelist);
135 free_iova_fast(&cookie->iovad,
136 fq->entries[idx].iova_pfn,
137 fq->entries[idx].pages);
139 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
143 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
145 atomic64_inc(&cookie->fq_flush_start_cnt);
146 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
147 atomic64_inc(&cookie->fq_flush_finish_cnt);
150 static void fq_flush_timeout(struct timer_list *t)
152 struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
155 atomic_set(&cookie->fq_timer_on, 0);
156 fq_flush_iotlb(cookie);
158 for_each_possible_cpu(cpu) {
162 fq = per_cpu_ptr(cookie->fq, cpu);
163 spin_lock_irqsave(&fq->lock, flags);
164 fq_ring_free(cookie, fq);
165 spin_unlock_irqrestore(&fq->lock, flags);
169 static void queue_iova(struct iommu_dma_cookie *cookie,
170 unsigned long pfn, unsigned long pages,
171 struct list_head *freelist)
178 * Order against the IOMMU driver's pagetable update from unmapping
179 * @pte, to guarantee that fq_flush_iotlb() observes that if called
180 * from a different CPU before we release the lock below. Full barrier
181 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
182 * written fq state here.
186 fq = raw_cpu_ptr(cookie->fq);
187 spin_lock_irqsave(&fq->lock, flags);
190 * First remove all entries from the flush queue that have already been
191 * flushed out on another CPU. This makes the fq_full() check below less
194 fq_ring_free(cookie, fq);
197 fq_flush_iotlb(cookie);
198 fq_ring_free(cookie, fq);
201 idx = fq_ring_add(fq);
203 fq->entries[idx].iova_pfn = pfn;
204 fq->entries[idx].pages = pages;
205 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
206 list_splice(freelist, &fq->entries[idx].freelist);
208 spin_unlock_irqrestore(&fq->lock, flags);
210 /* Avoid false sharing as much as possible. */
211 if (!atomic_read(&cookie->fq_timer_on) &&
212 !atomic_xchg(&cookie->fq_timer_on, 1))
213 mod_timer(&cookie->fq_timer,
214 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
217 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
224 del_timer_sync(&cookie->fq_timer);
225 /* The IOVAs will be torn down separately, so just free our queued pages */
226 for_each_possible_cpu(cpu) {
227 struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
229 fq_ring_for_each(idx, fq)
230 put_pages_list(&fq->entries[idx].freelist);
233 free_percpu(cookie->fq);
236 /* sysfs updates are serialised by the mutex of the group owning @domain */
237 int iommu_dma_init_fq(struct iommu_domain *domain)
239 struct iommu_dma_cookie *cookie = domain->iova_cookie;
240 struct iova_fq __percpu *queue;
243 if (cookie->fq_domain)
246 atomic64_set(&cookie->fq_flush_start_cnt, 0);
247 atomic64_set(&cookie->fq_flush_finish_cnt, 0);
249 queue = alloc_percpu(struct iova_fq);
251 pr_warn("iova flush queue initialization failed\n");
255 for_each_possible_cpu(cpu) {
256 struct iova_fq *fq = per_cpu_ptr(queue, cpu);
261 spin_lock_init(&fq->lock);
263 for (i = 0; i < IOVA_FQ_SIZE; i++)
264 INIT_LIST_HEAD(&fq->entries[i].freelist);
269 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
270 atomic_set(&cookie->fq_timer_on, 0);
272 * Prevent incomplete fq state being observable. Pairs with path from
273 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
276 WRITE_ONCE(cookie->fq_domain, domain);
280 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
282 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
283 return cookie->iovad.granule;
287 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
289 struct iommu_dma_cookie *cookie;
291 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
293 INIT_LIST_HEAD(&cookie->msi_page_list);
300 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
301 * @domain: IOMMU domain to prepare for DMA-API usage
303 int iommu_get_dma_cookie(struct iommu_domain *domain)
305 if (domain->iova_cookie)
308 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
309 if (!domain->iova_cookie)
316 * iommu_get_msi_cookie - Acquire just MSI remapping resources
317 * @domain: IOMMU domain to prepare
318 * @base: Start address of IOVA region for MSI mappings
320 * Users who manage their own IOVA allocation and do not want DMA API support,
321 * but would still like to take advantage of automatic MSI remapping, can use
322 * this to initialise their own domain appropriately. Users should reserve a
323 * contiguous IOVA region, starting at @base, large enough to accommodate the
324 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
325 * used by the devices attached to @domain.
327 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
329 struct iommu_dma_cookie *cookie;
331 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
334 if (domain->iova_cookie)
337 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
341 cookie->msi_iova = base;
342 domain->iova_cookie = cookie;
345 EXPORT_SYMBOL(iommu_get_msi_cookie);
348 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
349 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
350 * iommu_get_msi_cookie()
352 void iommu_put_dma_cookie(struct iommu_domain *domain)
354 struct iommu_dma_cookie *cookie = domain->iova_cookie;
355 struct iommu_dma_msi_page *msi, *tmp;
360 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
361 iommu_dma_free_fq(cookie);
362 put_iova_domain(&cookie->iovad);
365 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
366 list_del(&msi->list);
370 domain->iova_cookie = NULL;
374 * iommu_dma_get_resv_regions - Reserved region driver helper
375 * @dev: Device from iommu_get_resv_regions()
376 * @list: Reserved region list from iommu_get_resv_regions()
378 * IOMMU drivers can use this to implement their .get_resv_regions callback
379 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
380 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
383 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
386 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
387 iort_iommu_msi_get_resv_regions(dev, list);
390 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
392 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
393 phys_addr_t start, phys_addr_t end)
395 struct iova_domain *iovad = &cookie->iovad;
396 struct iommu_dma_msi_page *msi_page;
399 start -= iova_offset(iovad, start);
400 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
402 for (i = 0; i < num_pages; i++) {
403 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
407 msi_page->phys = start;
408 msi_page->iova = start;
409 INIT_LIST_HEAD(&msi_page->list);
410 list_add(&msi_page->list, &cookie->msi_page_list);
411 start += iovad->granule;
417 static int iova_reserve_pci_windows(struct pci_dev *dev,
418 struct iova_domain *iovad)
420 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
421 struct resource_entry *window;
422 unsigned long lo, hi;
423 phys_addr_t start = 0, end;
425 resource_list_for_each_entry(window, &bridge->windows) {
426 if (resource_type(window->res) != IORESOURCE_MEM)
429 lo = iova_pfn(iovad, window->res->start - window->offset);
430 hi = iova_pfn(iovad, window->res->end - window->offset);
431 reserve_iova(iovad, lo, hi);
434 /* Get reserved DMA windows from host bridge */
435 resource_list_for_each_entry(window, &bridge->dma_ranges) {
436 end = window->res->start - window->offset;
439 lo = iova_pfn(iovad, start);
440 hi = iova_pfn(iovad, end);
441 reserve_iova(iovad, lo, hi);
442 } else if (end < start) {
443 /* dma_ranges list should be sorted */
445 "Failed to reserve IOVA [%pa-%pa]\n",
450 start = window->res->end - window->offset + 1;
451 /* If window is last entry */
452 if (window->node.next == &bridge->dma_ranges &&
453 end != ~(phys_addr_t)0) {
454 end = ~(phys_addr_t)0;
462 static int iova_reserve_iommu_regions(struct device *dev,
463 struct iommu_domain *domain)
465 struct iommu_dma_cookie *cookie = domain->iova_cookie;
466 struct iova_domain *iovad = &cookie->iovad;
467 struct iommu_resv_region *region;
468 LIST_HEAD(resv_regions);
471 if (dev_is_pci(dev)) {
472 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
477 iommu_get_resv_regions(dev, &resv_regions);
478 list_for_each_entry(region, &resv_regions, list) {
479 unsigned long lo, hi;
481 /* We ARE the software that manages these! */
482 if (region->type == IOMMU_RESV_SW_MSI)
485 lo = iova_pfn(iovad, region->start);
486 hi = iova_pfn(iovad, region->start + region->length - 1);
487 reserve_iova(iovad, lo, hi);
489 if (region->type == IOMMU_RESV_MSI)
490 ret = cookie_init_hw_msi_region(cookie, region->start,
491 region->start + region->length);
495 iommu_put_resv_regions(dev, &resv_regions);
500 static bool dev_is_untrusted(struct device *dev)
502 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
505 static bool dev_use_swiotlb(struct device *dev)
507 return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
511 * iommu_dma_init_domain - Initialise a DMA mapping domain
512 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
513 * @base: IOVA at which the mappable address space starts
514 * @limit: Last address of the IOVA space
515 * @dev: Device the domain is being initialised for
517 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
518 * avoid rounding surprises. If necessary, we reserve the page at address 0
519 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
520 * any change which could make prior IOVAs invalid will fail.
522 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
523 dma_addr_t limit, struct device *dev)
525 struct iommu_dma_cookie *cookie = domain->iova_cookie;
526 unsigned long order, base_pfn;
527 struct iova_domain *iovad;
530 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
533 iovad = &cookie->iovad;
535 /* Use the smallest supported page size for IOVA granularity */
536 order = __ffs(domain->pgsize_bitmap);
537 base_pfn = max_t(unsigned long, 1, base >> order);
539 /* Check the domain allows at least some access to the device... */
540 if (domain->geometry.force_aperture) {
541 if (base > domain->geometry.aperture_end ||
542 limit < domain->geometry.aperture_start) {
543 pr_warn("specified DMA range outside IOMMU capability\n");
546 /* ...then finally give it a kicking to make sure it fits */
547 base_pfn = max_t(unsigned long, base_pfn,
548 domain->geometry.aperture_start >> order);
551 /* start_pfn is always nonzero for an already-initialised domain */
552 if (iovad->start_pfn) {
553 if (1UL << order != iovad->granule ||
554 base_pfn != iovad->start_pfn) {
555 pr_warn("Incompatible range for DMA domain\n");
562 init_iova_domain(iovad, 1UL << order, base_pfn);
563 ret = iova_domain_init_rcaches(iovad);
567 /* If the FQ fails we can simply fall back to strict mode */
568 if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
569 domain->type = IOMMU_DOMAIN_DMA;
571 return iova_reserve_iommu_regions(dev, domain);
575 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
577 * @dir: Direction of DMA transfer
578 * @coherent: Is the DMA master cache-coherent?
579 * @attrs: DMA attributes for the mapping
581 * Return: corresponding IOMMU API page protection flags
583 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
586 int prot = coherent ? IOMMU_CACHE : 0;
588 if (attrs & DMA_ATTR_PRIVILEGED)
592 case DMA_BIDIRECTIONAL:
593 return prot | IOMMU_READ | IOMMU_WRITE;
595 return prot | IOMMU_READ;
596 case DMA_FROM_DEVICE:
597 return prot | IOMMU_WRITE;
603 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
604 size_t size, u64 dma_limit, struct device *dev)
606 struct iommu_dma_cookie *cookie = domain->iova_cookie;
607 struct iova_domain *iovad = &cookie->iovad;
608 unsigned long shift, iova_len, iova = 0;
610 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
611 cookie->msi_iova += size;
612 return cookie->msi_iova - size;
615 shift = iova_shift(iovad);
616 iova_len = size >> shift;
618 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
620 if (domain->geometry.force_aperture)
621 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
623 /* Try to get PCI devices a SAC address */
624 if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
625 iova = alloc_iova_fast(iovad, iova_len,
626 DMA_BIT_MASK(32) >> shift, false);
629 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
632 return (dma_addr_t)iova << shift;
635 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
636 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
638 struct iova_domain *iovad = &cookie->iovad;
640 /* The MSI case is only ever cleaning up its most recent allocation */
641 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
642 cookie->msi_iova -= size;
643 else if (gather && gather->queued)
644 queue_iova(cookie, iova_pfn(iovad, iova),
645 size >> iova_shift(iovad),
648 free_iova_fast(iovad, iova_pfn(iovad, iova),
649 size >> iova_shift(iovad));
652 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
655 struct iommu_domain *domain = iommu_get_dma_domain(dev);
656 struct iommu_dma_cookie *cookie = domain->iova_cookie;
657 struct iova_domain *iovad = &cookie->iovad;
658 size_t iova_off = iova_offset(iovad, dma_addr);
659 struct iommu_iotlb_gather iotlb_gather;
662 dma_addr -= iova_off;
663 size = iova_align(iovad, size + iova_off);
664 iommu_iotlb_gather_init(&iotlb_gather);
665 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
667 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
668 WARN_ON(unmapped != size);
670 if (!iotlb_gather.queued)
671 iommu_iotlb_sync(domain, &iotlb_gather);
672 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
675 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
676 size_t size, int prot, u64 dma_mask)
678 struct iommu_domain *domain = iommu_get_dma_domain(dev);
679 struct iommu_dma_cookie *cookie = domain->iova_cookie;
680 struct iova_domain *iovad = &cookie->iovad;
681 size_t iova_off = iova_offset(iovad, phys);
684 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
685 iommu_deferred_attach(dev, domain))
686 return DMA_MAPPING_ERROR;
688 size = iova_align(iovad, size + iova_off);
690 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
692 return DMA_MAPPING_ERROR;
694 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
695 iommu_dma_free_iova(cookie, iova, size, NULL);
696 return DMA_MAPPING_ERROR;
698 return iova + iova_off;
701 static void __iommu_dma_free_pages(struct page **pages, int count)
704 __free_page(pages[count]);
708 static struct page **__iommu_dma_alloc_pages(struct device *dev,
709 unsigned int count, unsigned long order_mask, gfp_t gfp)
712 unsigned int i = 0, nid = dev_to_node(dev);
714 order_mask &= (2U << MAX_ORDER) - 1;
718 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
722 /* IOMMU can map any pages, so himem can also be used here */
723 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
725 /* It makes no sense to muck about with huge pages */
729 struct page *page = NULL;
730 unsigned int order_size;
733 * Higher-order allocations are a convenience rather
734 * than a necessity, hence using __GFP_NORETRY until
735 * falling back to minimum-order allocations.
737 for (order_mask &= (2U << __fls(count)) - 1;
738 order_mask; order_mask &= ~order_size) {
739 unsigned int order = __fls(order_mask);
740 gfp_t alloc_flags = gfp;
742 order_size = 1U << order;
743 if (order_mask > order_size)
744 alloc_flags |= __GFP_NORETRY;
745 page = alloc_pages_node(nid, alloc_flags, order);
749 split_page(page, order);
753 __iommu_dma_free_pages(pages, i);
764 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
765 * but an IOMMU which supports smaller pages might not map the whole thing.
767 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
768 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
771 struct iommu_domain *domain = iommu_get_dma_domain(dev);
772 struct iommu_dma_cookie *cookie = domain->iova_cookie;
773 struct iova_domain *iovad = &cookie->iovad;
774 bool coherent = dev_is_dma_coherent(dev);
775 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
776 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
780 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
781 iommu_deferred_attach(dev, domain))
784 min_size = alloc_sizes & -alloc_sizes;
785 if (min_size < PAGE_SIZE) {
786 min_size = PAGE_SIZE;
787 alloc_sizes |= PAGE_SIZE;
789 size = ALIGN(size, min_size);
791 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
792 alloc_sizes = min_size;
794 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
795 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
800 size = iova_align(iovad, size);
801 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
805 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
808 if (!(ioprot & IOMMU_CACHE)) {
809 struct scatterlist *sg;
812 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
813 arch_dma_prep_coherent(sg_page(sg), sg->length);
816 if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
820 sgt->sgl->dma_address = iova;
821 sgt->sgl->dma_length = size;
827 iommu_dma_free_iova(cookie, iova, size, NULL);
829 __iommu_dma_free_pages(pages, count);
833 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
834 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
841 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
845 *dma_handle = sgt.sgl->dma_address;
847 vaddr = dma_common_pages_remap(pages, size, prot,
848 __builtin_return_address(0));
854 __iommu_dma_unmap(dev, *dma_handle, size);
855 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
859 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
860 size_t size, enum dma_data_direction dir, gfp_t gfp,
863 struct dma_sgt_handle *sh;
865 sh = kmalloc(sizeof(*sh), gfp);
869 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
878 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
879 struct sg_table *sgt, enum dma_data_direction dir)
881 struct dma_sgt_handle *sh = sgt_handle(sgt);
883 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
884 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
885 sg_free_table(&sh->sgt);
889 static void iommu_dma_sync_single_for_cpu(struct device *dev,
890 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
894 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
897 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
898 if (!dev_is_dma_coherent(dev))
899 arch_sync_dma_for_cpu(phys, size, dir);
901 if (is_swiotlb_buffer(dev, phys))
902 swiotlb_sync_single_for_cpu(dev, phys, size, dir);
905 static void iommu_dma_sync_single_for_device(struct device *dev,
906 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
910 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
913 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
914 if (is_swiotlb_buffer(dev, phys))
915 swiotlb_sync_single_for_device(dev, phys, size, dir);
917 if (!dev_is_dma_coherent(dev))
918 arch_sync_dma_for_device(phys, size, dir);
921 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
922 struct scatterlist *sgl, int nelems,
923 enum dma_data_direction dir)
925 struct scatterlist *sg;
928 if (dev_use_swiotlb(dev))
929 for_each_sg(sgl, sg, nelems, i)
930 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
932 else if (!dev_is_dma_coherent(dev))
933 for_each_sg(sgl, sg, nelems, i)
934 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
937 static void iommu_dma_sync_sg_for_device(struct device *dev,
938 struct scatterlist *sgl, int nelems,
939 enum dma_data_direction dir)
941 struct scatterlist *sg;
944 if (dev_use_swiotlb(dev))
945 for_each_sg(sgl, sg, nelems, i)
946 iommu_dma_sync_single_for_device(dev,
949 else if (!dev_is_dma_coherent(dev))
950 for_each_sg(sgl, sg, nelems, i)
951 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
954 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
955 unsigned long offset, size_t size, enum dma_data_direction dir,
958 phys_addr_t phys = page_to_phys(page) + offset;
959 bool coherent = dev_is_dma_coherent(dev);
960 int prot = dma_info_to_prot(dir, coherent, attrs);
961 struct iommu_domain *domain = iommu_get_dma_domain(dev);
962 struct iommu_dma_cookie *cookie = domain->iova_cookie;
963 struct iova_domain *iovad = &cookie->iovad;
964 dma_addr_t iova, dma_mask = dma_get_mask(dev);
967 * If both the physical buffer start address and size are
968 * page aligned, we don't need to use a bounce page.
970 if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
972 size_t padding_size, aligned_size;
974 aligned_size = iova_align(iovad, size);
975 phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
976 iova_mask(iovad), dir, attrs);
978 if (phys == DMA_MAPPING_ERROR)
979 return DMA_MAPPING_ERROR;
981 /* Cleanup the padding area. */
982 padding_start = phys_to_virt(phys);
983 padding_size = aligned_size;
985 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
986 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
987 padding_start += size;
988 padding_size -= size;
991 memset(padding_start, 0, padding_size);
994 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
995 arch_sync_dma_for_device(phys, size, dir);
997 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
998 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
999 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1003 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
1004 size_t size, enum dma_data_direction dir, unsigned long attrs)
1006 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1009 phys = iommu_iova_to_phys(domain, dma_handle);
1013 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
1014 arch_sync_dma_for_cpu(phys, size, dir);
1016 __iommu_dma_unmap(dev, dma_handle, size);
1018 if (unlikely(is_swiotlb_buffer(dev, phys)))
1019 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1023 * Prepare a successfully-mapped scatterlist to give back to the caller.
1025 * At this point the segments are already laid out by iommu_dma_map_sg() to
1026 * avoid individually crossing any boundaries, so we merely need to check a
1027 * segment's start address to avoid concatenating across one.
1029 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1030 dma_addr_t dma_addr)
1032 struct scatterlist *s, *cur = sg;
1033 unsigned long seg_mask = dma_get_seg_boundary(dev);
1034 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1037 for_each_sg(sg, s, nents, i) {
1038 /* Restore this segment's original unaligned fields first */
1039 unsigned int s_iova_off = sg_dma_address(s);
1040 unsigned int s_length = sg_dma_len(s);
1041 unsigned int s_iova_len = s->length;
1043 s->offset += s_iova_off;
1044 s->length = s_length;
1045 sg_dma_address(s) = DMA_MAPPING_ERROR;
1049 * Now fill in the real DMA data. If...
1050 * - there is a valid output segment to append to
1051 * - and this segment starts on an IOVA page boundary
1052 * - but doesn't fall at a segment boundary
1053 * - and wouldn't make the resulting output segment too long
1055 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1056 (max_len - cur_len >= s_length)) {
1057 /* ...then concatenate it with the previous one */
1058 cur_len += s_length;
1060 /* Otherwise start the next output segment */
1066 sg_dma_address(cur) = dma_addr + s_iova_off;
1069 sg_dma_len(cur) = cur_len;
1070 dma_addr += s_iova_len;
1072 if (s_length + s_iova_off < s_iova_len)
1079 * If mapping failed, then just restore the original list,
1080 * but making sure the DMA fields are invalidated.
1082 static void __invalidate_sg(struct scatterlist *sg, int nents)
1084 struct scatterlist *s;
1087 for_each_sg(sg, s, nents, i) {
1088 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
1089 s->offset += sg_dma_address(s);
1091 s->length = sg_dma_len(s);
1092 sg_dma_address(s) = DMA_MAPPING_ERROR;
1097 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1098 int nents, enum dma_data_direction dir, unsigned long attrs)
1100 struct scatterlist *s;
1103 for_each_sg(sg, s, nents, i)
1104 iommu_dma_unmap_page(dev, sg_dma_address(s),
1105 sg_dma_len(s), dir, attrs);
1108 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1109 int nents, enum dma_data_direction dir, unsigned long attrs)
1111 struct scatterlist *s;
1114 for_each_sg(sg, s, nents, i) {
1115 sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
1116 s->offset, s->length, dir, attrs);
1117 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
1119 sg_dma_len(s) = s->length;
1125 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1130 * The DMA API client is passing in a scatterlist which could describe
1131 * any old buffer layout, but the IOMMU API requires everything to be
1132 * aligned to IOMMU pages. Hence the need for this complicated bit of
1133 * impedance-matching, to be able to hand off a suitably-aligned list,
1134 * but still preserve the original offsets and sizes for the caller.
1136 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
1137 int nents, enum dma_data_direction dir, unsigned long attrs)
1139 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1140 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1141 struct iova_domain *iovad = &cookie->iovad;
1142 struct scatterlist *s, *prev = NULL;
1143 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
1145 size_t iova_len = 0;
1146 unsigned long mask = dma_get_seg_boundary(dev);
1150 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1151 ret = iommu_deferred_attach(dev, domain);
1156 if (dev_use_swiotlb(dev))
1157 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
1159 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1160 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1163 * Work out how much IOVA space we need, and align the segments to
1164 * IOVA granules for the IOMMU driver to handle. With some clever
1165 * trickery we can modify the list in-place, but reversibly, by
1166 * stashing the unaligned parts in the as-yet-unused DMA fields.
1168 for_each_sg(sg, s, nents, i) {
1169 size_t s_iova_off = iova_offset(iovad, s->offset);
1170 size_t s_length = s->length;
1171 size_t pad_len = (mask - iova_len + 1) & mask;
1173 sg_dma_address(s) = s_iova_off;
1174 sg_dma_len(s) = s_length;
1175 s->offset -= s_iova_off;
1176 s_length = iova_align(iovad, s_length + s_iova_off);
1177 s->length = s_length;
1180 * Due to the alignment of our single IOVA allocation, we can
1181 * depend on these assumptions about the segment boundary mask:
1182 * - If mask size >= IOVA size, then the IOVA range cannot
1183 * possibly fall across a boundary, so we don't care.
1184 * - If mask size < IOVA size, then the IOVA range must start
1185 * exactly on a boundary, therefore we can lay things out
1186 * based purely on segment lengths without needing to know
1187 * the actual addresses beforehand.
1188 * - The mask must be a power of 2, so pad_len == 0 if
1189 * iova_len == 0, thus we cannot dereference prev the first
1190 * time through here (i.e. before it has a meaningful value).
1192 if (pad_len && pad_len < s_length - 1) {
1193 prev->length += pad_len;
1194 iova_len += pad_len;
1197 iova_len += s_length;
1201 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1204 goto out_restore_sg;
1208 * We'll leave any physical concatenation to the IOMMU driver's
1209 * implementation - it knows better than we do.
1211 ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
1215 return __finalise_sg(dev, sg, nents, iova);
1218 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1220 __invalidate_sg(sg, nents);
1227 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1228 int nents, enum dma_data_direction dir, unsigned long attrs)
1230 dma_addr_t start, end;
1231 struct scatterlist *tmp;
1234 if (dev_use_swiotlb(dev)) {
1235 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1239 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1240 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1243 * The scatterlist segments are mapped into a single
1244 * contiguous IOVA allocation, so this is incredibly easy.
1246 start = sg_dma_address(sg);
1247 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
1248 if (sg_dma_len(tmp) == 0)
1252 end = sg_dma_address(sg) + sg_dma_len(sg);
1253 __iommu_dma_unmap(dev, start, end - start);
1256 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1257 size_t size, enum dma_data_direction dir, unsigned long attrs)
1259 return __iommu_dma_map(dev, phys, size,
1260 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1264 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1265 size_t size, enum dma_data_direction dir, unsigned long attrs)
1267 __iommu_dma_unmap(dev, handle, size);
1270 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1272 size_t alloc_size = PAGE_ALIGN(size);
1273 int count = alloc_size >> PAGE_SHIFT;
1274 struct page *page = NULL, **pages = NULL;
1276 /* Non-coherent atomic allocation? Easy */
1277 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1278 dma_free_from_pool(dev, cpu_addr, alloc_size))
1281 if (is_vmalloc_addr(cpu_addr)) {
1283 * If it the address is remapped, then it's either non-coherent
1284 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1286 pages = dma_common_find_pages(cpu_addr);
1288 page = vmalloc_to_page(cpu_addr);
1289 dma_common_free_remap(cpu_addr, alloc_size);
1291 /* Lowmem means a coherent atomic or CMA allocation */
1292 page = virt_to_page(cpu_addr);
1296 __iommu_dma_free_pages(pages, count);
1298 dma_free_contiguous(dev, page, alloc_size);
1301 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1302 dma_addr_t handle, unsigned long attrs)
1304 __iommu_dma_unmap(dev, handle, size);
1305 __iommu_dma_free(dev, size, cpu_addr);
1308 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1309 struct page **pagep, gfp_t gfp, unsigned long attrs)
1311 bool coherent = dev_is_dma_coherent(dev);
1312 size_t alloc_size = PAGE_ALIGN(size);
1313 int node = dev_to_node(dev);
1314 struct page *page = NULL;
1317 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1319 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1323 if (!coherent || PageHighMem(page)) {
1324 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1326 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1327 prot, __builtin_return_address(0));
1329 goto out_free_pages;
1332 arch_dma_prep_coherent(page, size);
1334 cpu_addr = page_address(page);
1338 memset(cpu_addr, 0, alloc_size);
1341 dma_free_contiguous(dev, page, alloc_size);
1345 static void *iommu_dma_alloc(struct device *dev, size_t size,
1346 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1348 bool coherent = dev_is_dma_coherent(dev);
1349 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1350 struct page *page = NULL;
1355 if (gfpflags_allow_blocking(gfp) &&
1356 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1357 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1358 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1361 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1362 !gfpflags_allow_blocking(gfp) && !coherent)
1363 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1366 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1370 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1371 dev->coherent_dma_mask);
1372 if (*handle == DMA_MAPPING_ERROR) {
1373 __iommu_dma_free(dev, size, cpu_addr);
1380 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1381 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1382 unsigned long attrs)
1384 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1385 unsigned long pfn, off = vma->vm_pgoff;
1388 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1390 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1393 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1396 if (is_vmalloc_addr(cpu_addr)) {
1397 struct page **pages = dma_common_find_pages(cpu_addr);
1400 return vm_map_pages(vma, pages, nr_pages);
1401 pfn = vmalloc_to_pfn(cpu_addr);
1403 pfn = page_to_pfn(virt_to_page(cpu_addr));
1406 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1407 vma->vm_end - vma->vm_start,
1411 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1412 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1413 unsigned long attrs)
1418 if (is_vmalloc_addr(cpu_addr)) {
1419 struct page **pages = dma_common_find_pages(cpu_addr);
1422 return sg_alloc_table_from_pages(sgt, pages,
1423 PAGE_ALIGN(size) >> PAGE_SHIFT,
1424 0, size, GFP_KERNEL);
1427 page = vmalloc_to_page(cpu_addr);
1429 page = virt_to_page(cpu_addr);
1432 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1434 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1438 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1440 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1442 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1445 static const struct dma_map_ops iommu_dma_ops = {
1446 .alloc = iommu_dma_alloc,
1447 .free = iommu_dma_free,
1448 .alloc_pages = dma_common_alloc_pages,
1449 .free_pages = dma_common_free_pages,
1450 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
1451 .free_noncontiguous = iommu_dma_free_noncontiguous,
1452 .mmap = iommu_dma_mmap,
1453 .get_sgtable = iommu_dma_get_sgtable,
1454 .map_page = iommu_dma_map_page,
1455 .unmap_page = iommu_dma_unmap_page,
1456 .map_sg = iommu_dma_map_sg,
1457 .unmap_sg = iommu_dma_unmap_sg,
1458 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1459 .sync_single_for_device = iommu_dma_sync_single_for_device,
1460 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1461 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1462 .map_resource = iommu_dma_map_resource,
1463 .unmap_resource = iommu_dma_unmap_resource,
1464 .get_merge_boundary = iommu_dma_get_merge_boundary,
1468 * The IOMMU core code allocates the default DMA domain, which the underlying
1469 * IOMMU driver needs to support via the dma-iommu layer.
1471 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
1473 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1479 * The IOMMU core code allocates the default DMA domain, which the
1480 * underlying IOMMU driver needs to support via the dma-iommu layer.
1482 if (iommu_is_dma_domain(domain)) {
1483 if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
1485 dev->dma_ops = &iommu_dma_ops;
1490 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1493 EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
1495 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1496 phys_addr_t msi_addr, struct iommu_domain *domain)
1498 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1499 struct iommu_dma_msi_page *msi_page;
1501 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1502 size_t size = cookie_msi_granule(cookie);
1504 msi_addr &= ~(phys_addr_t)(size - 1);
1505 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1506 if (msi_page->phys == msi_addr)
1509 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1513 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1517 if (iommu_map(domain, iova, msi_addr, size, prot))
1520 INIT_LIST_HEAD(&msi_page->list);
1521 msi_page->phys = msi_addr;
1522 msi_page->iova = iova;
1523 list_add(&msi_page->list, &cookie->msi_page_list);
1527 iommu_dma_free_iova(cookie, iova, size, NULL);
1533 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1535 struct device *dev = msi_desc_to_dev(desc);
1536 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1537 struct iommu_dma_msi_page *msi_page;
1538 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1540 if (!domain || !domain->iova_cookie) {
1541 desc->iommu_cookie = NULL;
1546 * In fact the whole prepare operation should already be serialised by
1547 * irq_domain_mutex further up the callchain, but that's pretty subtle
1548 * on its own, so consider this locking as failsafe documentation...
1550 mutex_lock(&msi_prepare_lock);
1551 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1552 mutex_unlock(&msi_prepare_lock);
1554 msi_desc_set_iommu_cookie(desc, msi_page);
1561 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1562 struct msi_msg *msg)
1564 struct device *dev = msi_desc_to_dev(desc);
1565 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1566 const struct iommu_dma_msi_page *msi_page;
1568 msi_page = msi_desc_get_iommu_cookie(desc);
1570 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1573 msg->address_hi = upper_32_bits(msi_page->iova);
1574 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1575 msg->address_lo += lower_32_bits(msi_page->iova);
1578 static int iommu_dma_init(void)
1580 if (is_kdump_kernel())
1581 static_branch_enable(&iommu_deferred_attach_enabled);
1583 return iova_cache_get();
1585 arch_initcall(iommu_dma_init);