1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
11 int forbid_dac __read_mostly;
12 EXPORT_SYMBOL(forbid_dac);
14 const struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
17 static int iommu_sac_force __read_mostly;
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly = 1;
21 int force_iommu __read_mostly = 1;
23 int panic_on_overflow __read_mostly = 0;
24 int force_iommu __read_mostly = 0;
27 int iommu_merge __read_mostly = 0;
29 int no_iommu __read_mostly;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly = 0;
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
50 int dma_set_mask(struct device *dev, u64 mask)
52 if (!dev->dma_mask || !dma_supported(dev, mask))
55 *dev->dma_mask = mask;
59 EXPORT_SYMBOL(dma_set_mask);
62 static __initdata void *dma32_bootmem_ptr;
63 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
65 static int __init parse_dma32_size_opt(char *p)
69 dma32_bootmem_size = memparse(p, &p);
72 early_param("dma32_size", parse_dma32_size_opt);
74 void __init dma32_reserve_bootmem(void)
76 unsigned long size, align;
77 if (end_pfn <= MAX_DMA32_PFN)
81 * check aperture_64.c allocate_aperture() for reason about
85 size = round_up(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
88 if (dma32_bootmem_ptr)
89 dma32_bootmem_size = size;
91 dma32_bootmem_size = 0;
93 static void __init dma32_free_bootmem(void)
96 if (end_pfn <= MAX_DMA32_PFN)
99 if (!dma32_bootmem_ptr)
102 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
104 dma32_bootmem_ptr = NULL;
105 dma32_bootmem_size = 0;
108 void __init pci_iommu_alloc(void)
110 /* free the range so iommu could get some range less than 4G */
111 dma32_free_bootmem();
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
116 #ifdef CONFIG_GART_IOMMU
117 gart_iommu_hole_init();
120 #ifdef CONFIG_CALGARY_IOMMU
124 detect_intel_iommu();
126 #ifdef CONFIG_SWIOTLB
133 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
136 static __init int iommu_setup(char *p)
144 if (!strncmp(p, "off", 3))
146 /* gart_parse_options has more force support */
147 if (!strncmp(p, "force", 5))
149 if (!strncmp(p, "noforce", 7)) {
154 if (!strncmp(p, "biomerge", 8)) {
155 iommu_bio_merge = 4096;
159 if (!strncmp(p, "panic", 5))
160 panic_on_overflow = 1;
161 if (!strncmp(p, "nopanic", 7))
162 panic_on_overflow = 0;
163 if (!strncmp(p, "merge", 5)) {
167 if (!strncmp(p, "nomerge", 7))
169 if (!strncmp(p, "forcesac", 8))
171 if (!strncmp(p, "allowdac", 8))
173 if (!strncmp(p, "nodac", 5))
175 if (!strncmp(p, "usedac", 6)) {
179 #ifdef CONFIG_SWIOTLB
180 if (!strncmp(p, "soft", 4))
184 #ifdef CONFIG_GART_IOMMU
185 gart_parse_options(p);
188 #ifdef CONFIG_CALGARY_IOMMU
189 if (!strncmp(p, "calgary", 7))
191 #endif /* CONFIG_CALGARY_IOMMU */
193 p += strcspn(p, ",");
199 early_param("iommu", iommu_setup);
202 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
203 dma_addr_t device_addr, size_t size, int flags)
205 void __iomem *mem_base = NULL;
206 int pages = size >> PAGE_SHIFT;
207 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
209 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
216 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
218 mem_base = ioremap(bus_addr, size);
222 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
225 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
226 if (!dev->dma_mem->bitmap)
229 dev->dma_mem->virt_base = mem_base;
230 dev->dma_mem->device_base = device_addr;
231 dev->dma_mem->size = pages;
232 dev->dma_mem->flags = flags;
234 if (flags & DMA_MEMORY_MAP)
235 return DMA_MEMORY_MAP;
237 return DMA_MEMORY_IO;
246 EXPORT_SYMBOL(dma_declare_coherent_memory);
248 void dma_release_declared_memory(struct device *dev)
250 struct dma_coherent_mem *mem = dev->dma_mem;
255 iounmap(mem->virt_base);
259 EXPORT_SYMBOL(dma_release_declared_memory);
261 void *dma_mark_declared_memory_occupied(struct device *dev,
262 dma_addr_t device_addr, size_t size)
264 struct dma_coherent_mem *mem = dev->dma_mem;
266 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
268 pages >>= PAGE_SHIFT;
271 return ERR_PTR(-EINVAL);
273 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
274 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
277 return mem->virt_base + (pos << PAGE_SHIFT);
279 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
281 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
282 dma_addr_t *dma_handle, void **ret)
284 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
285 int order = get_order(size);
288 int page = bitmap_find_free_region(mem->bitmap, mem->size,
291 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
292 *ret = mem->virt_base + (page << PAGE_SHIFT);
293 memset(*ret, 0, size);
295 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
298 return (mem != NULL);
301 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
303 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
305 if (mem && vaddr >= mem->virt_base && vaddr <
306 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
307 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
309 bitmap_release_region(mem->bitmap, page, order);
315 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
316 #define dma_release_coherent(dev, order, vaddr) (0)
317 #endif /* CONFIG_X86_32 */
319 int dma_supported(struct device *dev, u64 mask)
322 if (mask > 0xffffffff && forbid_dac > 0) {
323 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
329 if (dma_ops->dma_supported)
330 return dma_ops->dma_supported(dev, mask);
332 /* Copied from i386. Doesn't make much sense, because it will
333 only work for pci_alloc_coherent.
334 The caller just has to use GFP_DMA in this case. */
335 if (mask < DMA_24BIT_MASK)
338 /* Tell the device to use SAC when IOMMU force is on. This
339 allows the driver to use cheaper accesses in some cases.
341 Problem with this is that if we overflow the IOMMU area and
342 return DAC as fallback address the device may not handle it
345 As a special case some controllers have a 39bit address
346 mode that is as efficient as 32bit (aic79xx). Don't force
347 SAC for these. Assume all masks <= 40 bits are of this
348 type. Normally this doesn't make any difference, but gives
349 more gentle handling of IOMMU overflow. */
350 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
351 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
358 EXPORT_SYMBOL(dma_supported);
360 /* Allocate DMA memory on node near device */
361 noinline struct page *
362 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
366 node = dev_to_node(dev);
368 return alloc_pages_node(node, gfp, order);
372 * Allocate memory for a coherent mapping.
375 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
380 unsigned long dma_mask = 0;
384 /* ignore region specifiers */
385 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
387 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
394 dma_mask = dev->coherent_dma_mask;
396 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
398 /* Device not DMA able */
399 if (dev->dma_mask == NULL)
402 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
407 /* Why <=? Even when the mask is smaller than 4GB it is often
408 larger than 16MB and in this case we have a chance of
409 finding fitting memory in the next higher zone first. If
410 not retry with true GFP_DMA. -AK */
411 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
413 if (dma_mask < DMA_32BIT_MASK)
419 page = dma_alloc_pages(dev,
420 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
426 bus = page_to_phys(page);
427 memory = page_address(page);
428 high = (bus + size) >= dma_mask;
430 if (force_iommu && !(gfp & GFP_DMA))
433 free_pages((unsigned long)memory,
436 /* Don't use the 16MB ZONE_DMA unless absolutely
437 needed. It's better to use remapping first. */
438 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
439 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
443 /* Let low level make its own zone decisions */
444 gfp &= ~(GFP_DMA32|GFP_DMA);
446 if (dma_ops->alloc_coherent)
447 return dma_ops->alloc_coherent(dev, size,
452 memset(memory, 0, size);
459 if (dma_ops->alloc_coherent) {
460 free_pages((unsigned long)memory, get_order(size));
461 gfp &= ~(GFP_DMA|GFP_DMA32);
462 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
465 if (dma_ops->map_simple) {
466 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
468 PCI_DMA_BIDIRECTIONAL);
469 if (*dma_handle != bad_dma_address)
473 if (panic_on_overflow)
474 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
475 (unsigned long)size);
476 free_pages((unsigned long)memory, get_order(size));
479 EXPORT_SYMBOL(dma_alloc_coherent);
482 * Unmap coherent memory.
483 * The caller must ensure that the device has finished accessing the mapping.
485 void dma_free_coherent(struct device *dev, size_t size,
486 void *vaddr, dma_addr_t bus)
488 int order = get_order(size);
489 WARN_ON(irqs_disabled()); /* for portability */
490 if (dma_release_coherent(dev, order, vaddr))
492 if (dma_ops->unmap_single)
493 dma_ops->unmap_single(dev, bus, size, 0);
494 free_pages((unsigned long)vaddr, order);
496 EXPORT_SYMBOL(dma_free_coherent);
498 static int __init pci_iommu_init(void)
500 #ifdef CONFIG_CALGARY_IOMMU
501 calgary_iommu_init();
506 #ifdef CONFIG_GART_IOMMU
514 void pci_iommu_shutdown(void)
516 gart_iommu_shutdown();
518 /* Must execute after PCI subsystem */
519 fs_initcall(pci_iommu_init);
522 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
524 static __devinit void via_no_dac(struct pci_dev *dev)
526 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
527 printk(KERN_INFO "PCI: VIA PCI bridge detected."
532 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);