1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
9 * Dynamic DMA mapping support, bus-independent parts.
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/kdump.h>
35 #include <asm/fadump.h>
38 #include <asm/mmu_context.h>
42 #ifdef CONFIG_IOMMU_DEBUGFS
43 static int iommu_debugfs_weight_get(void *data, u64 *val)
45 struct iommu_table *tbl = data;
46 *val = bitmap_weight(tbl->it_map, tbl->it_size);
49 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
51 static void iommu_debugfs_add(struct iommu_table *tbl)
54 struct dentry *liobn_entry;
56 sprintf(name, "%08lx", tbl->it_index);
57 liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
68 static void iommu_debugfs_del(struct iommu_table *tbl)
71 struct dentry *liobn_entry;
73 sprintf(name, "%08lx", tbl->it_index);
74 liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
76 debugfs_remove(liobn_entry);
79 static void iommu_debugfs_add(struct iommu_table *tbl){}
80 static void iommu_debugfs_del(struct iommu_table *tbl){}
85 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
87 static int __init setup_iommu(char *str)
89 if (!strcmp(str, "novmerge"))
91 else if (!strcmp(str, "vmerge"))
96 __setup("iommu=", setup_iommu);
98 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
101 * We precalculate the hash to avoid doing it on every allocation.
103 * The hash is important to spread CPUs across all the pools. For example,
104 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
105 * with 4 pools all primary threads would map to the same pool.
107 static int __init setup_iommu_pool_hash(void)
111 for_each_possible_cpu(i)
112 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
116 subsys_initcall(setup_iommu_pool_hash);
118 #ifdef CONFIG_FAIL_IOMMU
120 static DECLARE_FAULT_ATTR(fail_iommu);
122 static int __init setup_fail_iommu(char *str)
124 return setup_fault_attr(&fail_iommu, str);
126 __setup("fail_iommu=", setup_fail_iommu);
128 static bool should_fail_iommu(struct device *dev)
130 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
133 static int __init fail_iommu_debugfs(void)
135 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
138 return PTR_ERR_OR_ZERO(dir);
140 late_initcall(fail_iommu_debugfs);
142 static ssize_t fail_iommu_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
145 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
148 static ssize_t fail_iommu_store(struct device *dev,
149 struct device_attribute *attr, const char *buf,
154 if (count > 0 && sscanf(buf, "%d", &i) > 0)
155 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
160 static DEVICE_ATTR_RW(fail_iommu);
162 static int fail_iommu_bus_notify(struct notifier_block *nb,
163 unsigned long action, void *data)
165 struct device *dev = data;
167 if (action == BUS_NOTIFY_ADD_DEVICE) {
168 if (device_create_file(dev, &dev_attr_fail_iommu))
169 pr_warn("Unable to create IOMMU fault injection sysfs "
171 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
172 device_remove_file(dev, &dev_attr_fail_iommu);
178 static struct notifier_block fail_iommu_bus_notifier = {
179 .notifier_call = fail_iommu_bus_notify
182 static int __init fail_iommu_setup(void)
185 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
188 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
194 * Must execute after PCI and VIO subsystem have initialised but before
195 * devices are probed.
197 arch_initcall(fail_iommu_setup);
199 static inline bool should_fail_iommu(struct device *dev)
205 static unsigned long iommu_range_alloc(struct device *dev,
206 struct iommu_table *tbl,
207 unsigned long npages,
208 unsigned long *handle,
210 unsigned int align_order)
212 unsigned long n, end, start;
214 int largealloc = npages > 15;
216 unsigned long align_mask;
218 unsigned int pool_nr;
219 struct iommu_pool *pool;
221 align_mask = (1ull << align_order) - 1;
223 /* This allocator was derived from x86_64's bit string search */
226 if (unlikely(npages == 0)) {
227 if (printk_ratelimit())
229 return DMA_MAPPING_ERROR;
232 if (should_fail_iommu(dev))
233 return DMA_MAPPING_ERROR;
236 * We don't need to disable preemption here because any CPU can
237 * safely use any IOMMU pool.
239 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
242 pool = &(tbl->large_pool);
244 pool = &(tbl->pools[pool_nr]);
246 spin_lock_irqsave(&(pool->lock), flags);
249 if ((pass == 0) && handle && *handle &&
250 (*handle >= pool->start) && (*handle < pool->end))
257 /* The case below can happen if we have a small segment appended
258 * to a large, or when the previous alloc was at the very end of
259 * the available space. If so, go back to the initial start.
264 if (limit + tbl->it_offset > mask) {
265 limit = mask - tbl->it_offset + 1;
266 /* If we're constrained on address range, first try
267 * at the masked hint to avoid O(n) search complexity,
268 * but on second pass, start at 0 in pool 0.
270 if ((start & mask) >= limit || pass > 0) {
271 spin_unlock(&(pool->lock));
272 pool = &(tbl->pools[0]);
273 spin_lock(&(pool->lock));
280 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
281 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
284 if (likely(pass == 0)) {
285 /* First try the pool from the start */
286 pool->hint = pool->start;
290 } else if (pass <= tbl->nr_pools) {
291 /* Now try scanning all the other pools */
292 spin_unlock(&(pool->lock));
293 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
294 pool = &tbl->pools[pool_nr];
295 spin_lock(&(pool->lock));
296 pool->hint = pool->start;
302 spin_unlock_irqrestore(&(pool->lock), flags);
303 return DMA_MAPPING_ERROR;
309 /* Bump the hint to a new block for small allocs. */
311 /* Don't bump to new block to avoid fragmentation */
314 /* Overflow will be taken care of at the next allocation */
315 pool->hint = (end + tbl->it_blocksize - 1) &
316 ~(tbl->it_blocksize - 1);
319 /* Update handle for SG allocations */
323 spin_unlock_irqrestore(&(pool->lock), flags);
328 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
329 void *page, unsigned int npages,
330 enum dma_data_direction direction,
331 unsigned long mask, unsigned int align_order,
335 dma_addr_t ret = DMA_MAPPING_ERROR;
338 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
340 if (unlikely(entry == DMA_MAPPING_ERROR))
341 return DMA_MAPPING_ERROR;
343 entry += tbl->it_offset; /* Offset into real TCE table */
344 ret = entry << tbl->it_page_shift; /* Set the return dma address */
346 /* Put the TCEs in the HW table */
347 build_fail = tbl->it_ops->set(tbl, entry, npages,
348 (unsigned long)page &
349 IOMMU_PAGE_MASK(tbl), direction, attrs);
351 /* tbl->it_ops->set() only returns non-zero for transient errors.
352 * Clean up the table bitmap in this case and return
353 * DMA_MAPPING_ERROR. For all other errors the functionality is
356 if (unlikely(build_fail)) {
357 __iommu_free(tbl, ret, npages);
358 return DMA_MAPPING_ERROR;
361 /* Flush/invalidate TLB caches if necessary */
362 if (tbl->it_ops->flush)
363 tbl->it_ops->flush(tbl);
365 /* Make sure updates are seen by hardware */
371 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
374 unsigned long entry, free_entry;
376 entry = dma_addr >> tbl->it_page_shift;
377 free_entry = entry - tbl->it_offset;
379 if (((free_entry + npages) > tbl->it_size) ||
380 (entry < tbl->it_offset)) {
381 if (printk_ratelimit()) {
382 printk(KERN_INFO "iommu_free: invalid entry\n");
383 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
384 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
385 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
386 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
387 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
388 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
389 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
399 static struct iommu_pool *get_pool(struct iommu_table *tbl,
402 struct iommu_pool *p;
403 unsigned long largepool_start = tbl->large_pool.start;
405 /* The large pool is the last pool at the top of the table */
406 if (entry >= largepool_start) {
407 p = &tbl->large_pool;
409 unsigned int pool_nr = entry / tbl->poolsize;
411 BUG_ON(pool_nr > tbl->nr_pools);
412 p = &tbl->pools[pool_nr];
418 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
421 unsigned long entry, free_entry;
423 struct iommu_pool *pool;
425 entry = dma_addr >> tbl->it_page_shift;
426 free_entry = entry - tbl->it_offset;
428 pool = get_pool(tbl, free_entry);
430 if (!iommu_free_check(tbl, dma_addr, npages))
433 tbl->it_ops->clear(tbl, entry, npages);
435 spin_lock_irqsave(&(pool->lock), flags);
436 bitmap_clear(tbl->it_map, free_entry, npages);
437 spin_unlock_irqrestore(&(pool->lock), flags);
440 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
443 __iommu_free(tbl, dma_addr, npages);
445 /* Make sure TLB cache is flushed if the HW needs it. We do
446 * not do an mb() here on purpose, it is not needed on any of
447 * the current platforms.
449 if (tbl->it_ops->flush)
450 tbl->it_ops->flush(tbl);
453 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
454 struct scatterlist *sglist, int nelems,
455 unsigned long mask, enum dma_data_direction direction,
458 dma_addr_t dma_next = 0, dma_addr;
459 struct scatterlist *s, *outs, *segstart;
460 int outcount, incount, i, build_fail = 0;
462 unsigned long handle;
463 unsigned int max_seg_size;
465 BUG_ON(direction == DMA_NONE);
467 if ((nelems == 0) || !tbl)
470 outs = s = segstart = &sglist[0];
475 /* Init first segment length for backout at failure */
476 outs->dma_length = 0;
478 DBG("sg mapping %d elements:\n", nelems);
480 max_seg_size = dma_get_max_seg_size(dev);
481 for_each_sg(sglist, s, nelems, i) {
482 unsigned long vaddr, npages, entry, slen;
490 /* Allocate iommu entries for that segment */
491 vaddr = (unsigned long) sg_virt(s);
492 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
494 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
495 (vaddr & ~PAGE_MASK) == 0)
496 align = PAGE_SHIFT - tbl->it_page_shift;
497 entry = iommu_range_alloc(dev, tbl, npages, &handle,
498 mask >> tbl->it_page_shift, align);
500 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
503 if (unlikely(entry == DMA_MAPPING_ERROR)) {
504 if (!(attrs & DMA_ATTR_NO_WARN) &&
506 dev_info(dev, "iommu_alloc failed, tbl %p "
507 "vaddr %lx npages %lu\n", tbl, vaddr,
512 /* Convert entry to a dma_addr_t */
513 entry += tbl->it_offset;
514 dma_addr = entry << tbl->it_page_shift;
515 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
517 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
518 npages, entry, dma_addr);
520 /* Insert into HW table */
521 build_fail = tbl->it_ops->set(tbl, entry, npages,
522 vaddr & IOMMU_PAGE_MASK(tbl),
524 if(unlikely(build_fail))
527 /* If we are in an open segment, try merging */
529 DBG(" - trying merge...\n");
530 /* We cannot merge if:
531 * - allocated dma_addr isn't contiguous to previous allocation
533 if (novmerge || (dma_addr != dma_next) ||
534 (outs->dma_length + s->length > max_seg_size)) {
535 /* Can't merge: create a new segment */
538 outs = sg_next(outs);
539 DBG(" can't merge, new segment.\n");
541 outs->dma_length += s->length;
542 DBG(" merged, new len: %ux\n", outs->dma_length);
547 /* This is a new segment, fill entries */
548 DBG(" - filling new segment.\n");
549 outs->dma_address = dma_addr;
550 outs->dma_length = slen;
553 /* Calculate next page pointer for contiguous check */
554 dma_next = dma_addr + slen;
556 DBG(" - dma next is: %lx\n", dma_next);
559 /* Flush/invalidate TLB caches if necessary */
560 if (tbl->it_ops->flush)
561 tbl->it_ops->flush(tbl);
563 DBG("mapped %d elements:\n", outcount);
565 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
566 * next entry of the sglist if we didn't fill the list completely
568 if (outcount < incount) {
569 outs = sg_next(outs);
570 outs->dma_address = DMA_MAPPING_ERROR;
571 outs->dma_length = 0;
574 /* Make sure updates are seen by hardware */
580 for_each_sg(sglist, s, nelems, i) {
581 if (s->dma_length != 0) {
582 unsigned long vaddr, npages;
584 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
585 npages = iommu_num_pages(s->dma_address, s->dma_length,
586 IOMMU_PAGE_SIZE(tbl));
587 __iommu_free(tbl, vaddr, npages);
588 s->dma_address = DMA_MAPPING_ERROR;
598 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
599 int nelems, enum dma_data_direction direction,
602 struct scatterlist *sg;
604 BUG_ON(direction == DMA_NONE);
612 dma_addr_t dma_handle = sg->dma_address;
614 if (sg->dma_length == 0)
616 npages = iommu_num_pages(dma_handle, sg->dma_length,
617 IOMMU_PAGE_SIZE(tbl));
618 __iommu_free(tbl, dma_handle, npages);
622 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
623 * do not do an mb() here, the affected platforms do not need it
626 if (tbl->it_ops->flush)
627 tbl->it_ops->flush(tbl);
630 static void iommu_table_clear(struct iommu_table *tbl)
633 * In case of firmware assisted dump system goes through clean
634 * reboot process at the time of system crash. Hence it's safe to
635 * clear the TCE entries if firmware assisted dump is active.
637 if (!is_kdump_kernel() || is_fadump_active()) {
638 /* Clear the table in case firmware left allocations in it */
639 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
643 #ifdef CONFIG_CRASH_DUMP
644 if (tbl->it_ops->get) {
645 unsigned long index, tceval, tcecount = 0;
647 /* Reserve the existing mappings left by the first kernel. */
648 for (index = 0; index < tbl->it_size; index++) {
649 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
651 * Freed TCE entry contains 0x7fffffffffffffff on JS20
653 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
654 __set_bit(index, tbl->it_map);
659 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
660 printk(KERN_WARNING "TCE table is full; freeing ");
661 printk(KERN_WARNING "%d entries for the kdump boot\n",
662 KDUMP_MIN_TCE_ENTRIES);
663 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
664 index < tbl->it_size; index++)
665 __clear_bit(index, tbl->it_map);
671 static void iommu_table_reserve_pages(struct iommu_table *tbl,
672 unsigned long res_start, unsigned long res_end)
676 WARN_ON_ONCE(res_end < res_start);
678 * Reserve page 0 so it will not be used for any mappings.
679 * This avoids buggy drivers that consider page 0 to be invalid
680 * to crash the machine or even lose data.
682 if (tbl->it_offset == 0)
683 set_bit(0, tbl->it_map);
685 tbl->it_reserved_start = res_start;
686 tbl->it_reserved_end = res_end;
688 /* Check if res_start..res_end isn't empty and overlaps the table */
689 if (res_start && res_end &&
690 (tbl->it_offset + tbl->it_size < res_start ||
691 res_end < tbl->it_offset))
694 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
695 set_bit(i - tbl->it_offset, tbl->it_map);
698 static void iommu_table_release_pages(struct iommu_table *tbl)
703 * In case we have reserved the first bit, we should not emit
706 if (tbl->it_offset == 0)
707 clear_bit(0, tbl->it_map);
709 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
710 clear_bit(i - tbl->it_offset, tbl->it_map);
714 * Build a iommu_table structure. This contains a bit map which
715 * is used to manage allocation of the tce space.
717 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
718 unsigned long res_start, unsigned long res_end)
721 static int welcomed = 0;
724 struct iommu_pool *p;
726 BUG_ON(!tbl->it_ops);
728 /* number of bytes needed for the bitmap */
729 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
731 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
733 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
734 tbl->it_map = page_address(page);
735 memset(tbl->it_map, 0, sz);
737 iommu_table_reserve_pages(tbl, res_start, res_end);
739 /* We only split the IOMMU table if we have 1GB or more of space */
740 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
741 tbl->nr_pools = IOMMU_NR_POOLS;
745 /* We reserve the top 1/4 of the table for large allocations */
746 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
748 for (i = 0; i < tbl->nr_pools; i++) {
750 spin_lock_init(&(p->lock));
751 p->start = tbl->poolsize * i;
753 p->end = p->start + tbl->poolsize;
756 p = &tbl->large_pool;
757 spin_lock_init(&(p->lock));
758 p->start = tbl->poolsize * i;
760 p->end = tbl->it_size;
762 iommu_table_clear(tbl);
765 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
766 novmerge ? "disabled" : "enabled");
770 iommu_debugfs_add(tbl);
775 static void iommu_table_free(struct kref *kref)
777 unsigned long bitmap_sz;
779 struct iommu_table *tbl;
781 tbl = container_of(kref, struct iommu_table, it_kref);
783 if (tbl->it_ops->free)
784 tbl->it_ops->free(tbl);
791 iommu_debugfs_del(tbl);
793 iommu_table_release_pages(tbl);
795 /* verify that table contains no entries */
796 if (!bitmap_empty(tbl->it_map, tbl->it_size))
797 pr_warn("%s: Unexpected TCEs\n", __func__);
799 /* calculate bitmap size in bytes */
800 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
803 order = get_order(bitmap_sz);
804 free_pages((unsigned long) tbl->it_map, order);
810 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
812 if (kref_get_unless_zero(&tbl->it_kref))
817 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
819 int iommu_tce_table_put(struct iommu_table *tbl)
824 return kref_put(&tbl->it_kref, iommu_table_free);
826 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
828 /* Creates TCEs for a user provided buffer. The user buffer must be
829 * contiguous real kernel storage (not vmalloc). The address passed here
830 * comprises a page address and offset into that page. The dma_addr_t
831 * returned will point to the same byte within the page as was passed in.
833 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
834 struct page *page, unsigned long offset, size_t size,
835 unsigned long mask, enum dma_data_direction direction,
838 dma_addr_t dma_handle = DMA_MAPPING_ERROR;
841 unsigned int npages, align;
843 BUG_ON(direction == DMA_NONE);
845 vaddr = page_address(page) + offset;
846 uaddr = (unsigned long)vaddr;
849 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
851 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
852 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
853 align = PAGE_SHIFT - tbl->it_page_shift;
855 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
856 mask >> tbl->it_page_shift, align,
858 if (dma_handle == DMA_MAPPING_ERROR) {
859 if (!(attrs & DMA_ATTR_NO_WARN) &&
860 printk_ratelimit()) {
861 dev_info(dev, "iommu_alloc failed, tbl %p "
862 "vaddr %p npages %d\n", tbl, vaddr,
866 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
872 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
873 size_t size, enum dma_data_direction direction,
878 BUG_ON(direction == DMA_NONE);
881 npages = iommu_num_pages(dma_handle, size,
882 IOMMU_PAGE_SIZE(tbl));
883 iommu_free(tbl, dma_handle, npages);
887 /* Allocates a contiguous real buffer and creates mappings over it.
888 * Returns the virtual address of the buffer and sets dma_handle
889 * to the dma address (mapping) of the first page.
891 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
892 size_t size, dma_addr_t *dma_handle,
893 unsigned long mask, gfp_t flag, int node)
898 unsigned int nio_pages, io_order;
901 size = PAGE_ALIGN(size);
902 order = get_order(size);
905 * Client asked for way too much space. This is checked later
906 * anyway. It is easier to debug here for the drivers than in
909 if (order >= IOMAP_MAX_ORDER) {
910 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
918 /* Alloc enough pages (and possibly more) */
919 page = alloc_pages_node(node, flag, order);
922 ret = page_address(page);
923 memset(ret, 0, size);
925 /* Set up tces to cover the allocated range */
926 nio_pages = size >> tbl->it_page_shift;
927 io_order = get_iommu_order(size, tbl);
928 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
929 mask >> tbl->it_page_shift, io_order, 0);
930 if (mapping == DMA_MAPPING_ERROR) {
931 free_pages((unsigned long)ret, order);
934 *dma_handle = mapping;
938 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
939 void *vaddr, dma_addr_t dma_handle)
942 unsigned int nio_pages;
944 size = PAGE_ALIGN(size);
945 nio_pages = size >> tbl->it_page_shift;
946 iommu_free(tbl, dma_handle, nio_pages);
947 size = PAGE_ALIGN(size);
948 free_pages((unsigned long)vaddr, get_order(size));
952 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
955 case DMA_BIDIRECTIONAL:
956 return TCE_PCI_READ | TCE_PCI_WRITE;
957 case DMA_FROM_DEVICE:
958 return TCE_PCI_WRITE;
965 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
967 #ifdef CONFIG_IOMMU_API
971 static void group_release(void *iommu_data)
973 struct iommu_table_group *table_group = iommu_data;
975 table_group->group = NULL;
978 void iommu_register_group(struct iommu_table_group *table_group,
979 int pci_domain_number, unsigned long pe_num)
981 struct iommu_group *grp;
984 grp = iommu_group_alloc();
986 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
990 table_group->group = grp;
991 iommu_group_set_iommudata(grp, table_group, group_release);
992 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
993 pci_domain_number, pe_num);
996 iommu_group_set_name(grp, name);
1000 enum dma_data_direction iommu_tce_direction(unsigned long tce)
1002 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1003 return DMA_BIDIRECTIONAL;
1004 else if (tce & TCE_PCI_READ)
1005 return DMA_TO_DEVICE;
1006 else if (tce & TCE_PCI_WRITE)
1007 return DMA_FROM_DEVICE;
1011 EXPORT_SYMBOL_GPL(iommu_tce_direction);
1013 void iommu_flush_tce(struct iommu_table *tbl)
1015 /* Flush/invalidate TLB caches if necessary */
1016 if (tbl->it_ops->flush)
1017 tbl->it_ops->flush(tbl);
1019 /* Make sure updates are seen by hardware */
1022 EXPORT_SYMBOL_GPL(iommu_flush_tce);
1024 int iommu_tce_check_ioba(unsigned long page_shift,
1025 unsigned long offset, unsigned long size,
1026 unsigned long ioba, unsigned long npages)
1028 unsigned long mask = (1UL << page_shift) - 1;
1033 ioba >>= page_shift;
1037 if ((ioba + 1) > (offset + size))
1042 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1044 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1046 unsigned long mask = (1UL << page_shift) - 1;
1053 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1055 extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1056 struct iommu_table *tbl,
1057 unsigned long entry, unsigned long *hpa,
1058 enum dma_data_direction *direction)
1061 unsigned long size = 0;
1063 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
1064 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1065 (*direction == DMA_BIDIRECTIONAL)) &&
1066 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1068 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1072 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1074 void iommu_tce_kill(struct iommu_table *tbl,
1075 unsigned long entry, unsigned long pages)
1077 if (tbl->it_ops->tce_kill)
1078 tbl->it_ops->tce_kill(tbl, entry, pages, false);
1080 EXPORT_SYMBOL_GPL(iommu_tce_kill);
1082 int iommu_take_ownership(struct iommu_table *tbl)
1084 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1088 * VFIO does not control TCE entries allocation and the guest
1089 * can write new TCEs on top of existing ones so iommu_tce_build()
1090 * must be able to release old pages. This functionality
1091 * requires exchange() callback defined so if it is not
1092 * implemented, we disallow taking ownership over the table.
1094 if (!tbl->it_ops->xchg_no_kill)
1097 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1098 for (i = 0; i < tbl->nr_pools; i++)
1099 spin_lock(&tbl->pools[i].lock);
1101 iommu_table_release_pages(tbl);
1103 if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1104 pr_err("iommu_tce: it_map is not empty");
1106 /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1107 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1108 tbl->it_reserved_end);
1110 memset(tbl->it_map, 0xff, sz);
1113 for (i = 0; i < tbl->nr_pools; i++)
1114 spin_unlock(&tbl->pools[i].lock);
1115 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1119 EXPORT_SYMBOL_GPL(iommu_take_ownership);
1121 void iommu_release_ownership(struct iommu_table *tbl)
1123 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1125 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1126 for (i = 0; i < tbl->nr_pools; i++)
1127 spin_lock(&tbl->pools[i].lock);
1129 memset(tbl->it_map, 0, sz);
1131 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1132 tbl->it_reserved_end);
1134 for (i = 0; i < tbl->nr_pools; i++)
1135 spin_unlock(&tbl->pools[i].lock);
1136 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1138 EXPORT_SYMBOL_GPL(iommu_release_ownership);
1140 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1143 * The sysfs entries should be populated before
1144 * binding IOMMU group. If sysfs entries isn't
1145 * ready, we simply bail.
1147 if (!device_is_registered(dev))
1150 if (device_iommu_mapped(dev)) {
1151 pr_debug("%s: Skipping device %s with iommu group %d\n",
1152 __func__, dev_name(dev),
1153 iommu_group_id(dev->iommu_group));
1157 pr_debug("%s: Adding %s to iommu group %d\n",
1158 __func__, dev_name(dev), iommu_group_id(table_group->group));
1160 return iommu_group_add_device(table_group->group, dev);
1162 EXPORT_SYMBOL_GPL(iommu_add_device);
1164 void iommu_del_device(struct device *dev)
1167 * Some devices might not have IOMMU table and group
1168 * and we needn't detach them from the associated
1171 if (!device_iommu_mapped(dev)) {
1172 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1177 iommu_group_remove_device(dev);
1179 EXPORT_SYMBOL_GPL(iommu_del_device);
1180 #endif /* CONFIG_IOMMU_API */