2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/syscore_ops.h>
40 #include <linux/tboot.h>
41 #include <linux/dmi.h>
42 #include <linux/pci-ats.h>
43 #include <asm/cacheflush.h>
44 #include <asm/iommu.h>
46 #define ROOT_SIZE VTD_PAGE_SIZE
47 #define CONTEXT_SIZE VTD_PAGE_SIZE
49 #define IS_BRIDGE_HOST_DEVICE(pdev) \
50 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
51 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
53 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
55 #define IOAPIC_RANGE_START (0xfee00000)
56 #define IOAPIC_RANGE_END (0xfeefffff)
57 #define IOVA_START_ADDR (0x1000)
59 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61 #define MAX_AGAW_WIDTH 64
63 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
72 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
73 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
74 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
76 /* page table handling */
77 #define LEVEL_STRIDE (9)
78 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80 static inline int agaw_to_level(int agaw)
85 static inline int agaw_to_width(int agaw)
87 return 30 + agaw * LEVEL_STRIDE;
90 static inline int width_to_agaw(int width)
92 return (width - 30) / LEVEL_STRIDE;
95 static inline unsigned int level_to_offset_bits(int level)
97 return (level - 1) * LEVEL_STRIDE;
100 static inline int pfn_level_offset(unsigned long pfn, int level)
102 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
105 static inline unsigned long level_mask(int level)
107 return -1UL << level_to_offset_bits(level);
110 static inline unsigned long level_size(int level)
112 return 1UL << level_to_offset_bits(level);
115 static inline unsigned long align_to_level(unsigned long pfn, int level)
117 return (pfn + level_size(level) - 1) & level_mask(level);
120 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
122 return 1 << ((lvl - 1) * LEVEL_STRIDE);
125 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
126 are never going to work. */
127 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
129 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
132 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
134 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
136 static inline unsigned long page_to_dma_pfn(struct page *pg)
138 return mm_to_dma_pfn(page_to_pfn(pg));
140 static inline unsigned long virt_to_dma_pfn(void *p)
142 return page_to_dma_pfn(virt_to_page(p));
145 /* global iommu list, set NULL for ignored DMAR units */
146 static struct intel_iommu **g_iommus;
148 static void __init check_tylersburg_isoch(void);
149 static int rwbf_quirk;
152 * set to 1 to panic kernel if can't successfully enable VT-d
153 * (used when kernel is launched w/ TXT)
155 static int force_on = 0;
160 * 12-63: Context Ptr (12 - (haw-1))
167 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
168 static inline bool root_present(struct root_entry *root)
170 return (root->val & 1);
172 static inline void set_root_present(struct root_entry *root)
176 static inline void set_root_value(struct root_entry *root, unsigned long value)
178 root->val |= value & VTD_PAGE_MASK;
181 static inline struct context_entry *
182 get_context_addr_from_root(struct root_entry *root)
184 return (struct context_entry *)
185 (root_present(root)?phys_to_virt(
186 root->val & VTD_PAGE_MASK) :
193 * 1: fault processing disable
194 * 2-3: translation type
195 * 12-63: address space root
201 struct context_entry {
206 static inline bool context_present(struct context_entry *context)
208 return (context->lo & 1);
210 static inline void context_set_present(struct context_entry *context)
215 static inline void context_set_fault_enable(struct context_entry *context)
217 context->lo &= (((u64)-1) << 2) | 1;
220 static inline void context_set_translation_type(struct context_entry *context,
223 context->lo &= (((u64)-1) << 4) | 3;
224 context->lo |= (value & 3) << 2;
227 static inline void context_set_address_root(struct context_entry *context,
230 context->lo |= value & VTD_PAGE_MASK;
233 static inline void context_set_address_width(struct context_entry *context,
236 context->hi |= value & 7;
239 static inline void context_set_domain_id(struct context_entry *context,
242 context->hi |= (value & ((1 << 16) - 1)) << 8;
245 static inline void context_clear_entry(struct context_entry *context)
258 * 12-63: Host physcial address
264 static inline void dma_clear_pte(struct dma_pte *pte)
269 static inline void dma_set_pte_readable(struct dma_pte *pte)
271 pte->val |= DMA_PTE_READ;
274 static inline void dma_set_pte_writable(struct dma_pte *pte)
276 pte->val |= DMA_PTE_WRITE;
279 static inline void dma_set_pte_snp(struct dma_pte *pte)
281 pte->val |= DMA_PTE_SNP;
284 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
286 pte->val = (pte->val & ~3) | (prot & 3);
289 static inline u64 dma_pte_addr(struct dma_pte *pte)
292 return pte->val & VTD_PAGE_MASK;
294 /* Must have a full atomic 64-bit read */
295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
299 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
301 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
304 static inline bool dma_pte_present(struct dma_pte *pte)
306 return (pte->val & 3) != 0;
309 static inline int first_pte_in_page(struct dma_pte *pte)
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
320 static struct dmar_domain *si_domain;
321 static int hw_pass_through = 1;
323 /* devices under the same p2p bridge are owned in one domain */
324 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
326 /* domain represents a virtual machine, more than one devices
327 * across iommus may be owned in one domain, e.g. kvm guest.
329 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
331 /* si_domain contains mulitple devices */
332 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
335 int id; /* domain id */
336 int nid; /* node id */
337 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
339 struct list_head devices; /* all devices' list */
340 struct iova_domain iovad; /* iova's that belong to this domain */
342 struct dma_pte *pgd; /* virtual address */
343 int gaw; /* max guest address width */
345 /* adjusted guest address width, 0 is level 2 30-bit */
348 int flags; /* flags to find out type of domain */
350 int iommu_coherency;/* indicate coherency of iommu access */
351 int iommu_snooping; /* indicate snooping control feature*/
352 int iommu_count; /* reference count of iommu */
353 int iommu_superpage;/* Level of superpages supported:
354 0 == 4KiB (no superpages), 1 == 2MiB,
355 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
356 spinlock_t iommu_lock; /* protect iommu set in domain */
357 u64 max_addr; /* maximum mapped address */
360 /* PCI domain-device relationship */
361 struct device_domain_info {
362 struct list_head link; /* link to domain siblings */
363 struct list_head global; /* link to global list */
364 int segment; /* PCI domain */
365 u8 bus; /* PCI bus number */
366 u8 devfn; /* PCI devfn number */
367 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
368 struct intel_iommu *iommu; /* IOMMU used by this device */
369 struct dmar_domain *domain; /* pointer to domain */
372 static void flush_unmaps_timeout(unsigned long data);
374 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
376 #define HIGH_WATER_MARK 250
377 struct deferred_flush_tables {
379 struct iova *iova[HIGH_WATER_MARK];
380 struct dmar_domain *domain[HIGH_WATER_MARK];
383 static struct deferred_flush_tables *deferred_flush;
385 /* bitmap for indexing intel_iommus */
386 static int g_num_of_iommus;
388 static DEFINE_SPINLOCK(async_umap_flush_lock);
389 static LIST_HEAD(unmaps_to_do);
392 static long list_size;
394 static void domain_remove_dev_info(struct dmar_domain *domain);
396 #ifdef CONFIG_DMAR_DEFAULT_ON
397 int dmar_disabled = 0;
399 int dmar_disabled = 1;
400 #endif /*CONFIG_DMAR_DEFAULT_ON*/
402 static int dmar_map_gfx = 1;
403 static int dmar_forcedac;
404 static int intel_iommu_strict;
405 static int intel_iommu_superpage = 1;
407 int intel_iommu_gfx_mapped;
408 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
410 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
411 static DEFINE_SPINLOCK(device_domain_lock);
412 static LIST_HEAD(device_domain_list);
414 static struct iommu_ops intel_iommu_ops;
416 static int __init intel_iommu_setup(char *str)
421 if (!strncmp(str, "on", 2)) {
423 printk(KERN_INFO "Intel-IOMMU: enabled\n");
424 } else if (!strncmp(str, "off", 3)) {
426 printk(KERN_INFO "Intel-IOMMU: disabled\n");
427 } else if (!strncmp(str, "igfx_off", 8)) {
430 "Intel-IOMMU: disable GFX device mapping\n");
431 } else if (!strncmp(str, "forcedac", 8)) {
433 "Intel-IOMMU: Forcing DAC for PCI devices\n");
435 } else if (!strncmp(str, "strict", 6)) {
437 "Intel-IOMMU: disable batched IOTLB flush\n");
438 intel_iommu_strict = 1;
439 } else if (!strncmp(str, "sp_off", 6)) {
441 "Intel-IOMMU: disable supported super page\n");
442 intel_iommu_superpage = 0;
445 str += strcspn(str, ",");
451 __setup("intel_iommu=", intel_iommu_setup);
453 static struct kmem_cache *iommu_domain_cache;
454 static struct kmem_cache *iommu_devinfo_cache;
455 static struct kmem_cache *iommu_iova_cache;
457 static inline void *alloc_pgtable_page(int node)
462 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
464 vaddr = page_address(page);
468 static inline void free_pgtable_page(void *vaddr)
470 free_page((unsigned long)vaddr);
473 static inline void *alloc_domain_mem(void)
475 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
478 static void free_domain_mem(void *vaddr)
480 kmem_cache_free(iommu_domain_cache, vaddr);
483 static inline void * alloc_devinfo_mem(void)
485 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
488 static inline void free_devinfo_mem(void *vaddr)
490 kmem_cache_free(iommu_devinfo_cache, vaddr);
493 struct iova *alloc_iova_mem(void)
495 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
498 void free_iova_mem(struct iova *iova)
500 kmem_cache_free(iommu_iova_cache, iova);
504 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
509 sagaw = cap_sagaw(iommu->cap);
510 for (agaw = width_to_agaw(max_gaw);
512 if (test_bit(agaw, &sagaw))
520 * Calculate max SAGAW for each iommu.
522 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
524 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
528 * calculate agaw for each iommu.
529 * "SAGAW" may be different across iommus, use a default agaw, and
530 * get a supported less agaw for iommus that don't support the default agaw.
532 int iommu_calculate_agaw(struct intel_iommu *iommu)
534 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
537 /* This functionin only returns single iommu in a domain */
538 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
542 /* si_domain and vm domain should not get here. */
543 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
544 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
546 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
547 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
550 return g_iommus[iommu_id];
553 static void domain_update_iommu_coherency(struct dmar_domain *domain)
557 domain->iommu_coherency = 1;
559 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
560 if (!ecap_coherent(g_iommus[i]->ecap)) {
561 domain->iommu_coherency = 0;
567 static void domain_update_iommu_snooping(struct dmar_domain *domain)
571 domain->iommu_snooping = 1;
573 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
574 if (!ecap_sc_support(g_iommus[i]->ecap)) {
575 domain->iommu_snooping = 0;
581 static void domain_update_iommu_superpage(struct dmar_domain *domain)
585 if (!intel_iommu_superpage) {
586 domain->iommu_superpage = 0;
590 domain->iommu_superpage = 4; /* 1TiB */
592 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
593 mask |= cap_super_page_val(g_iommus[i]->cap);
598 domain->iommu_superpage = fls(mask);
601 /* Some capabilities may be different across iommus */
602 static void domain_update_iommu_cap(struct dmar_domain *domain)
604 domain_update_iommu_coherency(domain);
605 domain_update_iommu_snooping(domain);
606 domain_update_iommu_superpage(domain);
609 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
611 struct dmar_drhd_unit *drhd = NULL;
614 for_each_drhd_unit(drhd) {
617 if (segment != drhd->segment)
620 for (i = 0; i < drhd->devices_cnt; i++) {
621 if (drhd->devices[i] &&
622 drhd->devices[i]->bus->number == bus &&
623 drhd->devices[i]->devfn == devfn)
625 if (drhd->devices[i] &&
626 drhd->devices[i]->subordinate &&
627 drhd->devices[i]->subordinate->number <= bus &&
628 drhd->devices[i]->subordinate->subordinate >= bus)
632 if (drhd->include_all)
639 static void domain_flush_cache(struct dmar_domain *domain,
640 void *addr, int size)
642 if (!domain->iommu_coherency)
643 clflush_cache_range(addr, size);
646 /* Gets context entry for a given bus and devfn */
647 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
650 struct root_entry *root;
651 struct context_entry *context;
652 unsigned long phy_addr;
655 spin_lock_irqsave(&iommu->lock, flags);
656 root = &iommu->root_entry[bus];
657 context = get_context_addr_from_root(root);
659 context = (struct context_entry *)
660 alloc_pgtable_page(iommu->node);
662 spin_unlock_irqrestore(&iommu->lock, flags);
665 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
666 phy_addr = virt_to_phys((void *)context);
667 set_root_value(root, phy_addr);
668 set_root_present(root);
669 __iommu_flush_cache(iommu, root, sizeof(*root));
671 spin_unlock_irqrestore(&iommu->lock, flags);
672 return &context[devfn];
675 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
677 struct root_entry *root;
678 struct context_entry *context;
682 spin_lock_irqsave(&iommu->lock, flags);
683 root = &iommu->root_entry[bus];
684 context = get_context_addr_from_root(root);
689 ret = context_present(&context[devfn]);
691 spin_unlock_irqrestore(&iommu->lock, flags);
695 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
697 struct root_entry *root;
698 struct context_entry *context;
701 spin_lock_irqsave(&iommu->lock, flags);
702 root = &iommu->root_entry[bus];
703 context = get_context_addr_from_root(root);
705 context_clear_entry(&context[devfn]);
706 __iommu_flush_cache(iommu, &context[devfn], \
709 spin_unlock_irqrestore(&iommu->lock, flags);
712 static void free_context_table(struct intel_iommu *iommu)
714 struct root_entry *root;
717 struct context_entry *context;
719 spin_lock_irqsave(&iommu->lock, flags);
720 if (!iommu->root_entry) {
723 for (i = 0; i < ROOT_ENTRY_NR; i++) {
724 root = &iommu->root_entry[i];
725 context = get_context_addr_from_root(root);
727 free_pgtable_page(context);
729 free_pgtable_page(iommu->root_entry);
730 iommu->root_entry = NULL;
732 spin_unlock_irqrestore(&iommu->lock, flags);
735 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
736 unsigned long pfn, int large_level)
738 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
739 struct dma_pte *parent, *pte = NULL;
740 int level = agaw_to_level(domain->agaw);
741 int offset, target_level;
743 BUG_ON(!domain->pgd);
744 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
745 parent = domain->pgd;
751 target_level = large_level;
756 offset = pfn_level_offset(pfn, level);
757 pte = &parent[offset];
758 if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
760 if (level == target_level)
763 if (!dma_pte_present(pte)) {
766 tmp_page = alloc_pgtable_page(domain->nid);
771 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
772 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
773 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
774 /* Someone else set it while we were thinking; use theirs. */
775 free_pgtable_page(tmp_page);
778 domain_flush_cache(domain, pte, sizeof(*pte));
781 parent = phys_to_virt(dma_pte_addr(pte));
789 /* return address's pte at specific level */
790 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
792 int level, int *large_page)
794 struct dma_pte *parent, *pte = NULL;
795 int total = agaw_to_level(domain->agaw);
798 parent = domain->pgd;
799 while (level <= total) {
800 offset = pfn_level_offset(pfn, total);
801 pte = &parent[offset];
805 if (!dma_pte_present(pte)) {
810 if (pte->val & DMA_PTE_LARGE_PAGE) {
815 parent = phys_to_virt(dma_pte_addr(pte));
821 /* clear last level pte, a tlb flush should be followed */
822 static void dma_pte_clear_range(struct dmar_domain *domain,
823 unsigned long start_pfn,
824 unsigned long last_pfn)
826 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
827 unsigned int large_page = 1;
828 struct dma_pte *first_pte, *pte;
830 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
831 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
832 BUG_ON(start_pfn > last_pfn);
834 /* we don't need lock here; nobody else touches the iova range */
837 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
839 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
844 start_pfn += lvl_to_nr_pages(large_page);
846 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
848 domain_flush_cache(domain, first_pte,
849 (void *)pte - (void *)first_pte);
851 } while (start_pfn && start_pfn <= last_pfn);
854 /* free page table pages. last level pte should already be cleared */
855 static void dma_pte_free_pagetable(struct dmar_domain *domain,
856 unsigned long start_pfn,
857 unsigned long last_pfn)
859 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
860 struct dma_pte *first_pte, *pte;
861 int total = agaw_to_level(domain->agaw);
866 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
867 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
868 BUG_ON(start_pfn > last_pfn);
870 /* We don't need lock here; nobody else touches the iova range */
872 while (level <= total) {
873 tmp = align_to_level(start_pfn, level);
875 /* If we can't even clear one PTE at this level, we're done */
876 if (tmp + level_size(level) - 1 > last_pfn)
881 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
882 if (large_page > level)
883 level = large_page + 1;
885 tmp = align_to_level(tmp + 1, level + 1);
889 if (dma_pte_present(pte)) {
890 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
894 tmp += level_size(level);
895 } while (!first_pte_in_page(pte) &&
896 tmp + level_size(level) - 1 <= last_pfn);
898 domain_flush_cache(domain, first_pte,
899 (void *)pte - (void *)first_pte);
901 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
905 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
906 free_pgtable_page(domain->pgd);
912 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
914 struct root_entry *root;
917 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
921 __iommu_flush_cache(iommu, root, ROOT_SIZE);
923 spin_lock_irqsave(&iommu->lock, flags);
924 iommu->root_entry = root;
925 spin_unlock_irqrestore(&iommu->lock, flags);
930 static void iommu_set_root_entry(struct intel_iommu *iommu)
936 addr = iommu->root_entry;
938 spin_lock_irqsave(&iommu->register_lock, flag);
939 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
941 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
943 /* Make sure hardware complete it */
944 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
945 readl, (sts & DMA_GSTS_RTPS), sts);
947 spin_unlock_irqrestore(&iommu->register_lock, flag);
950 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
955 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
958 spin_lock_irqsave(&iommu->register_lock, flag);
959 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
961 /* Make sure hardware complete it */
962 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
963 readl, (!(val & DMA_GSTS_WBFS)), val);
965 spin_unlock_irqrestore(&iommu->register_lock, flag);
968 /* return value determine if we need a write buffer flush */
969 static void __iommu_flush_context(struct intel_iommu *iommu,
970 u16 did, u16 source_id, u8 function_mask,
977 case DMA_CCMD_GLOBAL_INVL:
978 val = DMA_CCMD_GLOBAL_INVL;
980 case DMA_CCMD_DOMAIN_INVL:
981 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
983 case DMA_CCMD_DEVICE_INVL:
984 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
985 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
992 spin_lock_irqsave(&iommu->register_lock, flag);
993 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
995 /* Make sure hardware complete it */
996 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
997 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
999 spin_unlock_irqrestore(&iommu->register_lock, flag);
1002 /* return value determine if we need a write buffer flush */
1003 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1004 u64 addr, unsigned int size_order, u64 type)
1006 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1007 u64 val = 0, val_iva = 0;
1011 case DMA_TLB_GLOBAL_FLUSH:
1012 /* global flush doesn't need set IVA_REG */
1013 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1015 case DMA_TLB_DSI_FLUSH:
1016 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1018 case DMA_TLB_PSI_FLUSH:
1019 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1020 /* Note: always flush non-leaf currently */
1021 val_iva = size_order | addr;
1026 /* Note: set drain read/write */
1029 * This is probably to be super secure.. Looks like we can
1030 * ignore it without any impact.
1032 if (cap_read_drain(iommu->cap))
1033 val |= DMA_TLB_READ_DRAIN;
1035 if (cap_write_drain(iommu->cap))
1036 val |= DMA_TLB_WRITE_DRAIN;
1038 spin_lock_irqsave(&iommu->register_lock, flag);
1039 /* Note: Only uses first TLB reg currently */
1041 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1042 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1044 /* Make sure hardware complete it */
1045 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1046 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1048 spin_unlock_irqrestore(&iommu->register_lock, flag);
1050 /* check IOTLB invalidation granularity */
1051 if (DMA_TLB_IAIG(val) == 0)
1052 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1053 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1054 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1055 (unsigned long long)DMA_TLB_IIRG(type),
1056 (unsigned long long)DMA_TLB_IAIG(val));
1059 static struct device_domain_info *iommu_support_dev_iotlb(
1060 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1063 unsigned long flags;
1064 struct device_domain_info *info;
1065 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1067 if (!ecap_dev_iotlb_support(iommu->ecap))
1073 spin_lock_irqsave(&device_domain_lock, flags);
1074 list_for_each_entry(info, &domain->devices, link)
1075 if (info->bus == bus && info->devfn == devfn) {
1079 spin_unlock_irqrestore(&device_domain_lock, flags);
1081 if (!found || !info->dev)
1084 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1087 if (!dmar_find_matched_atsr_unit(info->dev))
1090 info->iommu = iommu;
1095 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1100 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1103 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1105 if (!info->dev || !pci_ats_enabled(info->dev))
1108 pci_disable_ats(info->dev);
1111 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1112 u64 addr, unsigned mask)
1115 unsigned long flags;
1116 struct device_domain_info *info;
1118 spin_lock_irqsave(&device_domain_lock, flags);
1119 list_for_each_entry(info, &domain->devices, link) {
1120 if (!info->dev || !pci_ats_enabled(info->dev))
1123 sid = info->bus << 8 | info->devfn;
1124 qdep = pci_ats_queue_depth(info->dev);
1125 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1127 spin_unlock_irqrestore(&device_domain_lock, flags);
1130 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1131 unsigned long pfn, unsigned int pages, int map)
1133 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1134 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1139 * Fallback to domain selective flush if no PSI support or the size is
1141 * PSI requires page size to be 2 ^ x, and the base address is naturally
1142 * aligned to the size
1144 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1145 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1148 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1152 * In caching mode, changes of pages from non-present to present require
1153 * flush. However, device IOTLB doesn't need to be flushed in this case.
1155 if (!cap_caching_mode(iommu->cap) || !map)
1156 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1159 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1162 unsigned long flags;
1164 spin_lock_irqsave(&iommu->register_lock, flags);
1165 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1166 pmen &= ~DMA_PMEN_EPM;
1167 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1169 /* wait for the protected region status bit to clear */
1170 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1171 readl, !(pmen & DMA_PMEN_PRS), pmen);
1173 spin_unlock_irqrestore(&iommu->register_lock, flags);
1176 static int iommu_enable_translation(struct intel_iommu *iommu)
1179 unsigned long flags;
1181 spin_lock_irqsave(&iommu->register_lock, flags);
1182 iommu->gcmd |= DMA_GCMD_TE;
1183 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1185 /* Make sure hardware complete it */
1186 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1187 readl, (sts & DMA_GSTS_TES), sts);
1189 spin_unlock_irqrestore(&iommu->register_lock, flags);
1193 static int iommu_disable_translation(struct intel_iommu *iommu)
1198 spin_lock_irqsave(&iommu->register_lock, flag);
1199 iommu->gcmd &= ~DMA_GCMD_TE;
1200 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1202 /* Make sure hardware complete it */
1203 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1204 readl, (!(sts & DMA_GSTS_TES)), sts);
1206 spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 static int iommu_init_domains(struct intel_iommu *iommu)
1213 unsigned long ndomains;
1214 unsigned long nlongs;
1216 ndomains = cap_ndoms(iommu->cap);
1217 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1219 nlongs = BITS_TO_LONGS(ndomains);
1221 spin_lock_init(&iommu->lock);
1223 /* TBD: there might be 64K domains,
1224 * consider other allocation for future chip
1226 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1227 if (!iommu->domain_ids) {
1228 printk(KERN_ERR "Allocating domain id array failed\n");
1231 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1233 if (!iommu->domains) {
1234 printk(KERN_ERR "Allocating domain array failed\n");
1239 * if Caching mode is set, then invalid translations are tagged
1240 * with domainid 0. Hence we need to pre-allocate it.
1242 if (cap_caching_mode(iommu->cap))
1243 set_bit(0, iommu->domain_ids);
1248 static void domain_exit(struct dmar_domain *domain);
1249 static void vm_domain_exit(struct dmar_domain *domain);
1251 void free_dmar_iommu(struct intel_iommu *iommu)
1253 struct dmar_domain *domain;
1255 unsigned long flags;
1257 if ((iommu->domains) && (iommu->domain_ids)) {
1258 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1259 domain = iommu->domains[i];
1260 clear_bit(i, iommu->domain_ids);
1262 spin_lock_irqsave(&domain->iommu_lock, flags);
1263 if (--domain->iommu_count == 0) {
1264 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1265 vm_domain_exit(domain);
1267 domain_exit(domain);
1269 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1273 if (iommu->gcmd & DMA_GCMD_TE)
1274 iommu_disable_translation(iommu);
1277 irq_set_handler_data(iommu->irq, NULL);
1278 /* This will mask the irq */
1279 free_irq(iommu->irq, iommu);
1280 destroy_irq(iommu->irq);
1283 kfree(iommu->domains);
1284 kfree(iommu->domain_ids);
1286 g_iommus[iommu->seq_id] = NULL;
1288 /* if all iommus are freed, free g_iommus */
1289 for (i = 0; i < g_num_of_iommus; i++) {
1294 if (i == g_num_of_iommus)
1297 /* free context mapping */
1298 free_context_table(iommu);
1301 static struct dmar_domain *alloc_domain(void)
1303 struct dmar_domain *domain;
1305 domain = alloc_domain_mem();
1310 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1316 static int iommu_attach_domain(struct dmar_domain *domain,
1317 struct intel_iommu *iommu)
1320 unsigned long ndomains;
1321 unsigned long flags;
1323 ndomains = cap_ndoms(iommu->cap);
1325 spin_lock_irqsave(&iommu->lock, flags);
1327 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1328 if (num >= ndomains) {
1329 spin_unlock_irqrestore(&iommu->lock, flags);
1330 printk(KERN_ERR "IOMMU: no free domain ids\n");
1335 set_bit(num, iommu->domain_ids);
1336 set_bit(iommu->seq_id, &domain->iommu_bmp);
1337 iommu->domains[num] = domain;
1338 spin_unlock_irqrestore(&iommu->lock, flags);
1343 static void iommu_detach_domain(struct dmar_domain *domain,
1344 struct intel_iommu *iommu)
1346 unsigned long flags;
1350 spin_lock_irqsave(&iommu->lock, flags);
1351 ndomains = cap_ndoms(iommu->cap);
1352 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1353 if (iommu->domains[num] == domain) {
1360 clear_bit(num, iommu->domain_ids);
1361 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1362 iommu->domains[num] = NULL;
1364 spin_unlock_irqrestore(&iommu->lock, flags);
1367 static struct iova_domain reserved_iova_list;
1368 static struct lock_class_key reserved_rbtree_key;
1370 static int dmar_init_reserved_ranges(void)
1372 struct pci_dev *pdev = NULL;
1376 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1378 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1379 &reserved_rbtree_key);
1381 /* IOAPIC ranges shouldn't be accessed by DMA */
1382 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1383 IOVA_PFN(IOAPIC_RANGE_END));
1385 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1389 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1390 for_each_pci_dev(pdev) {
1393 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1394 r = &pdev->resource[i];
1395 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1397 iova = reserve_iova(&reserved_iova_list,
1401 printk(KERN_ERR "Reserve iova failed\n");
1409 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1411 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1414 static inline int guestwidth_to_adjustwidth(int gaw)
1417 int r = (gaw - 12) % 9;
1428 static int domain_init(struct dmar_domain *domain, int guest_width)
1430 struct intel_iommu *iommu;
1431 int adjust_width, agaw;
1432 unsigned long sagaw;
1434 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1435 spin_lock_init(&domain->iommu_lock);
1437 domain_reserve_special_ranges(domain);
1439 /* calculate AGAW */
1440 iommu = domain_get_iommu(domain);
1441 if (guest_width > cap_mgaw(iommu->cap))
1442 guest_width = cap_mgaw(iommu->cap);
1443 domain->gaw = guest_width;
1444 adjust_width = guestwidth_to_adjustwidth(guest_width);
1445 agaw = width_to_agaw(adjust_width);
1446 sagaw = cap_sagaw(iommu->cap);
1447 if (!test_bit(agaw, &sagaw)) {
1448 /* hardware doesn't support it, choose a bigger one */
1449 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1450 agaw = find_next_bit(&sagaw, 5, agaw);
1454 domain->agaw = agaw;
1455 INIT_LIST_HEAD(&domain->devices);
1457 if (ecap_coherent(iommu->ecap))
1458 domain->iommu_coherency = 1;
1460 domain->iommu_coherency = 0;
1462 if (ecap_sc_support(iommu->ecap))
1463 domain->iommu_snooping = 1;
1465 domain->iommu_snooping = 0;
1467 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1468 domain->iommu_count = 1;
1469 domain->nid = iommu->node;
1471 /* always allocate the top pgd */
1472 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1475 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1479 static void domain_exit(struct dmar_domain *domain)
1481 struct dmar_drhd_unit *drhd;
1482 struct intel_iommu *iommu;
1484 /* Domain 0 is reserved, so dont process it */
1488 /* Flush any lazy unmaps that may reference this domain */
1489 if (!intel_iommu_strict)
1490 flush_unmaps_timeout(0);
1492 domain_remove_dev_info(domain);
1494 put_iova_domain(&domain->iovad);
1497 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1499 /* free page tables */
1500 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1502 for_each_active_iommu(iommu, drhd)
1503 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1504 iommu_detach_domain(domain, iommu);
1506 free_domain_mem(domain);
1509 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1510 u8 bus, u8 devfn, int translation)
1512 struct context_entry *context;
1513 unsigned long flags;
1514 struct intel_iommu *iommu;
1515 struct dma_pte *pgd;
1517 unsigned long ndomains;
1520 struct device_domain_info *info = NULL;
1522 pr_debug("Set context mapping for %02x:%02x.%d\n",
1523 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1525 BUG_ON(!domain->pgd);
1526 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1527 translation != CONTEXT_TT_MULTI_LEVEL);
1529 iommu = device_to_iommu(segment, bus, devfn);
1533 context = device_to_context_entry(iommu, bus, devfn);
1536 spin_lock_irqsave(&iommu->lock, flags);
1537 if (context_present(context)) {
1538 spin_unlock_irqrestore(&iommu->lock, flags);
1545 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1546 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1549 /* find an available domain id for this device in iommu */
1550 ndomains = cap_ndoms(iommu->cap);
1551 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1552 if (iommu->domains[num] == domain) {
1560 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1561 if (num >= ndomains) {
1562 spin_unlock_irqrestore(&iommu->lock, flags);
1563 printk(KERN_ERR "IOMMU: no free domain ids\n");
1567 set_bit(num, iommu->domain_ids);
1568 iommu->domains[num] = domain;
1572 /* Skip top levels of page tables for
1573 * iommu which has less agaw than default.
1574 * Unnecessary for PT mode.
1576 if (translation != CONTEXT_TT_PASS_THROUGH) {
1577 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1578 pgd = phys_to_virt(dma_pte_addr(pgd));
1579 if (!dma_pte_present(pgd)) {
1580 spin_unlock_irqrestore(&iommu->lock, flags);
1587 context_set_domain_id(context, id);
1589 if (translation != CONTEXT_TT_PASS_THROUGH) {
1590 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1591 translation = info ? CONTEXT_TT_DEV_IOTLB :
1592 CONTEXT_TT_MULTI_LEVEL;
1595 * In pass through mode, AW must be programmed to indicate the largest
1596 * AGAW value supported by hardware. And ASR is ignored by hardware.
1598 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1599 context_set_address_width(context, iommu->msagaw);
1601 context_set_address_root(context, virt_to_phys(pgd));
1602 context_set_address_width(context, iommu->agaw);
1605 context_set_translation_type(context, translation);
1606 context_set_fault_enable(context);
1607 context_set_present(context);
1608 domain_flush_cache(domain, context, sizeof(*context));
1611 * It's a non-present to present mapping. If hardware doesn't cache
1612 * non-present entry we only need to flush the write-buffer. If the
1613 * _does_ cache non-present entries, then it does so in the special
1614 * domain #0, which we have to flush:
1616 if (cap_caching_mode(iommu->cap)) {
1617 iommu->flush.flush_context(iommu, 0,
1618 (((u16)bus) << 8) | devfn,
1619 DMA_CCMD_MASK_NOBIT,
1620 DMA_CCMD_DEVICE_INVL);
1621 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1623 iommu_flush_write_buffer(iommu);
1625 iommu_enable_dev_iotlb(info);
1626 spin_unlock_irqrestore(&iommu->lock, flags);
1628 spin_lock_irqsave(&domain->iommu_lock, flags);
1629 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1630 domain->iommu_count++;
1631 if (domain->iommu_count == 1)
1632 domain->nid = iommu->node;
1633 domain_update_iommu_cap(domain);
1635 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1640 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1644 struct pci_dev *tmp, *parent;
1646 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1647 pdev->bus->number, pdev->devfn,
1652 /* dependent device mapping */
1653 tmp = pci_find_upstream_pcie_bridge(pdev);
1656 /* Secondary interface's bus number and devfn 0 */
1657 parent = pdev->bus->self;
1658 while (parent != tmp) {
1659 ret = domain_context_mapping_one(domain,
1660 pci_domain_nr(parent->bus),
1661 parent->bus->number,
1662 parent->devfn, translation);
1665 parent = parent->bus->self;
1667 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1668 return domain_context_mapping_one(domain,
1669 pci_domain_nr(tmp->subordinate),
1670 tmp->subordinate->number, 0,
1672 else /* this is a legacy PCI bridge */
1673 return domain_context_mapping_one(domain,
1674 pci_domain_nr(tmp->bus),
1680 static int domain_context_mapped(struct pci_dev *pdev)
1683 struct pci_dev *tmp, *parent;
1684 struct intel_iommu *iommu;
1686 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1691 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1694 /* dependent device mapping */
1695 tmp = pci_find_upstream_pcie_bridge(pdev);
1698 /* Secondary interface's bus number and devfn 0 */
1699 parent = pdev->bus->self;
1700 while (parent != tmp) {
1701 ret = device_context_mapped(iommu, parent->bus->number,
1705 parent = parent->bus->self;
1707 if (pci_is_pcie(tmp))
1708 return device_context_mapped(iommu, tmp->subordinate->number,
1711 return device_context_mapped(iommu, tmp->bus->number,
1715 /* Returns a number of VTD pages, but aligned to MM page size */
1716 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1719 host_addr &= ~PAGE_MASK;
1720 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1723 /* Return largest possible superpage level for a given mapping */
1724 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1725 unsigned long iov_pfn,
1726 unsigned long phy_pfn,
1727 unsigned long pages)
1729 int support, level = 1;
1730 unsigned long pfnmerge;
1732 support = domain->iommu_superpage;
1734 /* To use a large page, the virtual *and* physical addresses
1735 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1736 of them will mean we have to use smaller pages. So just
1737 merge them and check both at once. */
1738 pfnmerge = iov_pfn | phy_pfn;
1740 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1741 pages >>= VTD_STRIDE_SHIFT;
1744 pfnmerge >>= VTD_STRIDE_SHIFT;
1751 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1752 struct scatterlist *sg, unsigned long phys_pfn,
1753 unsigned long nr_pages, int prot)
1755 struct dma_pte *first_pte = NULL, *pte = NULL;
1756 phys_addr_t uninitialized_var(pteval);
1757 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1758 unsigned long sg_res;
1759 unsigned int largepage_lvl = 0;
1760 unsigned long lvl_pages = 0;
1762 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1764 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1767 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1772 sg_res = nr_pages + 1;
1773 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1776 while (nr_pages > 0) {
1780 sg_res = aligned_nrpages(sg->offset, sg->length);
1781 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1782 sg->dma_length = sg->length;
1783 pteval = page_to_phys(sg_page(sg)) | prot;
1784 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1788 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1790 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1793 /* It is large page*/
1794 if (largepage_lvl > 1)
1795 pteval |= DMA_PTE_LARGE_PAGE;
1797 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1800 /* We don't need lock here, nobody else
1801 * touches the iova range
1803 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1805 static int dumps = 5;
1806 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1807 iov_pfn, tmp, (unsigned long long)pteval);
1810 debug_dma_dump_mappings(NULL);
1815 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1817 BUG_ON(nr_pages < lvl_pages);
1818 BUG_ON(sg_res < lvl_pages);
1820 nr_pages -= lvl_pages;
1821 iov_pfn += lvl_pages;
1822 phys_pfn += lvl_pages;
1823 pteval += lvl_pages * VTD_PAGE_SIZE;
1824 sg_res -= lvl_pages;
1826 /* If the next PTE would be the first in a new page, then we
1827 need to flush the cache on the entries we've just written.
1828 And then we'll need to recalculate 'pte', so clear it and
1829 let it get set again in the if (!pte) block above.
1831 If we're done (!nr_pages) we need to flush the cache too.
1833 Also if we've been setting superpages, we may need to
1834 recalculate 'pte' and switch back to smaller pages for the
1835 end of the mapping, if the trailing size is not enough to
1836 use another superpage (i.e. sg_res < lvl_pages). */
1838 if (!nr_pages || first_pte_in_page(pte) ||
1839 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1840 domain_flush_cache(domain, first_pte,
1841 (void *)pte - (void *)first_pte);
1845 if (!sg_res && nr_pages)
1851 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1852 struct scatterlist *sg, unsigned long nr_pages,
1855 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1858 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1859 unsigned long phys_pfn, unsigned long nr_pages,
1862 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1865 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1870 clear_context_table(iommu, bus, devfn);
1871 iommu->flush.flush_context(iommu, 0, 0, 0,
1872 DMA_CCMD_GLOBAL_INVL);
1873 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1876 static void domain_remove_dev_info(struct dmar_domain *domain)
1878 struct device_domain_info *info;
1879 unsigned long flags;
1880 struct intel_iommu *iommu;
1882 spin_lock_irqsave(&device_domain_lock, flags);
1883 while (!list_empty(&domain->devices)) {
1884 info = list_entry(domain->devices.next,
1885 struct device_domain_info, link);
1886 list_del(&info->link);
1887 list_del(&info->global);
1889 info->dev->dev.archdata.iommu = NULL;
1890 spin_unlock_irqrestore(&device_domain_lock, flags);
1892 iommu_disable_dev_iotlb(info);
1893 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1894 iommu_detach_dev(iommu, info->bus, info->devfn);
1895 free_devinfo_mem(info);
1897 spin_lock_irqsave(&device_domain_lock, flags);
1899 spin_unlock_irqrestore(&device_domain_lock, flags);
1904 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1906 static struct dmar_domain *
1907 find_domain(struct pci_dev *pdev)
1909 struct device_domain_info *info;
1911 /* No lock here, assumes no domain exit in normal case */
1912 info = pdev->dev.archdata.iommu;
1914 return info->domain;
1918 /* domain is initialized */
1919 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1921 struct dmar_domain *domain, *found = NULL;
1922 struct intel_iommu *iommu;
1923 struct dmar_drhd_unit *drhd;
1924 struct device_domain_info *info, *tmp;
1925 struct pci_dev *dev_tmp;
1926 unsigned long flags;
1927 int bus = 0, devfn = 0;
1931 domain = find_domain(pdev);
1935 segment = pci_domain_nr(pdev->bus);
1937 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1939 if (pci_is_pcie(dev_tmp)) {
1940 bus = dev_tmp->subordinate->number;
1943 bus = dev_tmp->bus->number;
1944 devfn = dev_tmp->devfn;
1946 spin_lock_irqsave(&device_domain_lock, flags);
1947 list_for_each_entry(info, &device_domain_list, global) {
1948 if (info->segment == segment &&
1949 info->bus == bus && info->devfn == devfn) {
1950 found = info->domain;
1954 spin_unlock_irqrestore(&device_domain_lock, flags);
1955 /* pcie-pci bridge already has a domain, uses it */
1962 domain = alloc_domain();
1966 /* Allocate new domain for the device */
1967 drhd = dmar_find_matched_drhd_unit(pdev);
1969 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1973 iommu = drhd->iommu;
1975 ret = iommu_attach_domain(domain, iommu);
1977 free_domain_mem(domain);
1981 if (domain_init(domain, gaw)) {
1982 domain_exit(domain);
1986 /* register pcie-to-pci device */
1988 info = alloc_devinfo_mem();
1990 domain_exit(domain);
1993 info->segment = segment;
1995 info->devfn = devfn;
1997 info->domain = domain;
1998 /* This domain is shared by devices under p2p bridge */
1999 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2001 /* pcie-to-pci bridge already has a domain, uses it */
2003 spin_lock_irqsave(&device_domain_lock, flags);
2004 list_for_each_entry(tmp, &device_domain_list, global) {
2005 if (tmp->segment == segment &&
2006 tmp->bus == bus && tmp->devfn == devfn) {
2007 found = tmp->domain;
2012 spin_unlock_irqrestore(&device_domain_lock, flags);
2013 free_devinfo_mem(info);
2014 domain_exit(domain);
2017 list_add(&info->link, &domain->devices);
2018 list_add(&info->global, &device_domain_list);
2019 spin_unlock_irqrestore(&device_domain_lock, flags);
2024 info = alloc_devinfo_mem();
2027 info->segment = segment;
2028 info->bus = pdev->bus->number;
2029 info->devfn = pdev->devfn;
2031 info->domain = domain;
2032 spin_lock_irqsave(&device_domain_lock, flags);
2033 /* somebody is fast */
2034 found = find_domain(pdev);
2035 if (found != NULL) {
2036 spin_unlock_irqrestore(&device_domain_lock, flags);
2037 if (found != domain) {
2038 domain_exit(domain);
2041 free_devinfo_mem(info);
2044 list_add(&info->link, &domain->devices);
2045 list_add(&info->global, &device_domain_list);
2046 pdev->dev.archdata.iommu = info;
2047 spin_unlock_irqrestore(&device_domain_lock, flags);
2050 /* recheck it here, maybe others set it */
2051 return find_domain(pdev);
2054 static int iommu_identity_mapping;
2055 #define IDENTMAP_ALL 1
2056 #define IDENTMAP_GFX 2
2057 #define IDENTMAP_AZALIA 4
2059 static int iommu_domain_identity_map(struct dmar_domain *domain,
2060 unsigned long long start,
2061 unsigned long long end)
2063 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2064 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2066 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2067 dma_to_mm_pfn(last_vpfn))) {
2068 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2072 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2073 start, end, domain->id);
2075 * RMRR range might have overlap with physical memory range,
2078 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2080 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2081 last_vpfn - first_vpfn + 1,
2082 DMA_PTE_READ|DMA_PTE_WRITE);
2085 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2086 unsigned long long start,
2087 unsigned long long end)
2089 struct dmar_domain *domain;
2092 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2096 /* For _hardware_ passthrough, don't bother. But for software
2097 passthrough, we do it anyway -- it may indicate a memory
2098 range which is reserved in E820, so which didn't get set
2099 up to start with in si_domain */
2100 if (domain == si_domain && hw_pass_through) {
2101 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2102 pci_name(pdev), start, end);
2107 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2108 pci_name(pdev), start, end);
2111 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2112 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2113 dmi_get_system_info(DMI_BIOS_VENDOR),
2114 dmi_get_system_info(DMI_BIOS_VERSION),
2115 dmi_get_system_info(DMI_PRODUCT_VERSION));
2120 if (end >> agaw_to_width(domain->agaw)) {
2121 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2122 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2123 agaw_to_width(domain->agaw),
2124 dmi_get_system_info(DMI_BIOS_VENDOR),
2125 dmi_get_system_info(DMI_BIOS_VERSION),
2126 dmi_get_system_info(DMI_PRODUCT_VERSION));
2131 ret = iommu_domain_identity_map(domain, start, end);
2135 /* context entry init */
2136 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2143 domain_exit(domain);
2147 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2148 struct pci_dev *pdev)
2150 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2152 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2156 #ifdef CONFIG_DMAR_FLOPPY_WA
2157 static inline void iommu_prepare_isa(void)
2159 struct pci_dev *pdev;
2162 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2166 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2167 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2170 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2171 "floppy might not work\n");
2175 static inline void iommu_prepare_isa(void)
2179 #endif /* !CONFIG_DMAR_FLPY_WA */
2181 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2183 static int __init si_domain_work_fn(unsigned long start_pfn,
2184 unsigned long end_pfn, void *datax)
2188 *ret = iommu_domain_identity_map(si_domain,
2189 (uint64_t)start_pfn << PAGE_SHIFT,
2190 (uint64_t)end_pfn << PAGE_SHIFT);
2195 static int __init si_domain_init(int hw)
2197 struct dmar_drhd_unit *drhd;
2198 struct intel_iommu *iommu;
2201 si_domain = alloc_domain();
2205 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2207 for_each_active_iommu(iommu, drhd) {
2208 ret = iommu_attach_domain(si_domain, iommu);
2210 domain_exit(si_domain);
2215 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2216 domain_exit(si_domain);
2220 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2225 for_each_online_node(nid) {
2226 work_with_active_regions(nid, si_domain_work_fn, &ret);
2234 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2235 struct pci_dev *pdev);
2236 static int identity_mapping(struct pci_dev *pdev)
2238 struct device_domain_info *info;
2240 if (likely(!iommu_identity_mapping))
2243 info = pdev->dev.archdata.iommu;
2244 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2245 return (info->domain == si_domain);
2250 static int domain_add_dev_info(struct dmar_domain *domain,
2251 struct pci_dev *pdev,
2254 struct device_domain_info *info;
2255 unsigned long flags;
2258 info = alloc_devinfo_mem();
2262 ret = domain_context_mapping(domain, pdev, translation);
2264 free_devinfo_mem(info);
2268 info->segment = pci_domain_nr(pdev->bus);
2269 info->bus = pdev->bus->number;
2270 info->devfn = pdev->devfn;
2272 info->domain = domain;
2274 spin_lock_irqsave(&device_domain_lock, flags);
2275 list_add(&info->link, &domain->devices);
2276 list_add(&info->global, &device_domain_list);
2277 pdev->dev.archdata.iommu = info;
2278 spin_unlock_irqrestore(&device_domain_lock, flags);
2283 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2285 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2288 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2291 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2295 * We want to start off with all devices in the 1:1 domain, and
2296 * take them out later if we find they can't access all of memory.
2298 * However, we can't do this for PCI devices behind bridges,
2299 * because all PCI devices behind the same bridge will end up
2300 * with the same source-id on their transactions.
2302 * Practically speaking, we can't change things around for these
2303 * devices at run-time, because we can't be sure there'll be no
2304 * DMA transactions in flight for any of their siblings.
2306 * So PCI devices (unless they're on the root bus) as well as
2307 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2308 * the 1:1 domain, just in _case_ one of their siblings turns out
2309 * not to be able to map all of memory.
2311 if (!pci_is_pcie(pdev)) {
2312 if (!pci_is_root_bus(pdev->bus))
2314 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2316 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2320 * At boot time, we don't yet know if devices will be 64-bit capable.
2321 * Assume that they will -- if they turn out not to be, then we can
2322 * take them out of the 1:1 domain later.
2326 * If the device's dma_mask is less than the system's memory
2327 * size then this is not a candidate for identity mapping.
2329 u64 dma_mask = pdev->dma_mask;
2331 if (pdev->dev.coherent_dma_mask &&
2332 pdev->dev.coherent_dma_mask < dma_mask)
2333 dma_mask = pdev->dev.coherent_dma_mask;
2335 return dma_mask >= dma_get_required_mask(&pdev->dev);
2341 static int __init iommu_prepare_static_identity_mapping(int hw)
2343 struct pci_dev *pdev = NULL;
2346 ret = si_domain_init(hw);
2350 for_each_pci_dev(pdev) {
2351 /* Skip Host/PCI Bridge devices */
2352 if (IS_BRIDGE_HOST_DEVICE(pdev))
2354 if (iommu_should_identity_map(pdev, 1)) {
2355 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2356 hw ? "hardware" : "software", pci_name(pdev));
2358 ret = domain_add_dev_info(si_domain, pdev,
2359 hw ? CONTEXT_TT_PASS_THROUGH :
2360 CONTEXT_TT_MULTI_LEVEL);
2369 static int __init init_dmars(void)
2371 struct dmar_drhd_unit *drhd;
2372 struct dmar_rmrr_unit *rmrr;
2373 struct pci_dev *pdev;
2374 struct intel_iommu *iommu;
2380 * initialize and program root entry to not present
2383 for_each_drhd_unit(drhd) {
2386 * lock not needed as this is only incremented in the single
2387 * threaded kernel __init code path all other access are read
2392 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2395 printk(KERN_ERR "Allocating global iommu array failed\n");
2400 deferred_flush = kzalloc(g_num_of_iommus *
2401 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2402 if (!deferred_flush) {
2407 for_each_drhd_unit(drhd) {
2411 iommu = drhd->iommu;
2412 g_iommus[iommu->seq_id] = iommu;
2414 ret = iommu_init_domains(iommu);
2420 * we could share the same root & context tables
2421 * among all IOMMU's. Need to Split it later.
2423 ret = iommu_alloc_root_entry(iommu);
2425 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2428 if (!ecap_pass_through(iommu->ecap))
2429 hw_pass_through = 0;
2433 * Start from the sane iommu hardware state.
2435 for_each_drhd_unit(drhd) {
2439 iommu = drhd->iommu;
2442 * If the queued invalidation is already initialized by us
2443 * (for example, while enabling interrupt-remapping) then
2444 * we got the things already rolling from a sane state.
2450 * Clear any previous faults.
2452 dmar_fault(-1, iommu);
2454 * Disable queued invalidation if supported and already enabled
2455 * before OS handover.
2457 dmar_disable_qi(iommu);
2460 for_each_drhd_unit(drhd) {
2464 iommu = drhd->iommu;
2466 if (dmar_enable_qi(iommu)) {
2468 * Queued Invalidate not enabled, use Register Based
2471 iommu->flush.flush_context = __iommu_flush_context;
2472 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2473 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2476 (unsigned long long)drhd->reg_base_addr);
2478 iommu->flush.flush_context = qi_flush_context;
2479 iommu->flush.flush_iotlb = qi_flush_iotlb;
2480 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2483 (unsigned long long)drhd->reg_base_addr);
2487 if (iommu_pass_through)
2488 iommu_identity_mapping |= IDENTMAP_ALL;
2490 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2491 iommu_identity_mapping |= IDENTMAP_GFX;
2494 check_tylersburg_isoch();
2497 * If pass through is not set or not enabled, setup context entries for
2498 * identity mappings for rmrr, gfx, and isa and may fall back to static
2499 * identity mapping if iommu_identity_mapping is set.
2501 if (iommu_identity_mapping) {
2502 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2504 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2510 * for each dev attached to rmrr
2512 * locate drhd for dev, alloc domain for dev
2513 * allocate free domain
2514 * allocate page table entries for rmrr
2515 * if context not allocated for bus
2516 * allocate and init context
2517 * set present in root table for this bus
2518 * init context with domain, translation etc
2522 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2523 for_each_rmrr_units(rmrr) {
2524 for (i = 0; i < rmrr->devices_cnt; i++) {
2525 pdev = rmrr->devices[i];
2527 * some BIOS lists non-exist devices in DMAR
2532 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2535 "IOMMU: mapping reserved region failed\n");
2539 iommu_prepare_isa();
2544 * global invalidate context cache
2545 * global invalidate iotlb
2546 * enable translation
2548 for_each_drhd_unit(drhd) {
2549 if (drhd->ignored) {
2551 * we always have to disable PMRs or DMA may fail on
2555 iommu_disable_protect_mem_regions(drhd->iommu);
2558 iommu = drhd->iommu;
2560 iommu_flush_write_buffer(iommu);
2562 ret = dmar_set_interrupt(iommu);
2566 iommu_set_root_entry(iommu);
2568 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2569 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2571 ret = iommu_enable_translation(iommu);
2575 iommu_disable_protect_mem_regions(iommu);
2580 for_each_drhd_unit(drhd) {
2583 iommu = drhd->iommu;
2590 /* This takes a number of _MM_ pages, not VTD pages */
2591 static struct iova *intel_alloc_iova(struct device *dev,
2592 struct dmar_domain *domain,
2593 unsigned long nrpages, uint64_t dma_mask)
2595 struct pci_dev *pdev = to_pci_dev(dev);
2596 struct iova *iova = NULL;
2598 /* Restrict dma_mask to the width that the iommu can handle */
2599 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2601 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2603 * First try to allocate an io virtual address in
2604 * DMA_BIT_MASK(32) and if that fails then try allocating
2607 iova = alloc_iova(&domain->iovad, nrpages,
2608 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2612 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2613 if (unlikely(!iova)) {
2614 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2615 nrpages, pci_name(pdev));
2622 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2624 struct dmar_domain *domain;
2627 domain = get_domain_for_dev(pdev,
2628 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2631 "Allocating domain for %s failed", pci_name(pdev));
2635 /* make sure context mapping is ok */
2636 if (unlikely(!domain_context_mapped(pdev))) {
2637 ret = domain_context_mapping(domain, pdev,
2638 CONTEXT_TT_MULTI_LEVEL);
2641 "Domain context map for %s failed",
2650 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2652 struct device_domain_info *info;
2654 /* No lock here, assumes no domain exit in normal case */
2655 info = dev->dev.archdata.iommu;
2657 return info->domain;
2659 return __get_valid_domain_for_dev(dev);
2662 static int iommu_dummy(struct pci_dev *pdev)
2664 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2667 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2668 static int iommu_no_mapping(struct device *dev)
2670 struct pci_dev *pdev;
2673 if (unlikely(dev->bus != &pci_bus_type))
2676 pdev = to_pci_dev(dev);
2677 if (iommu_dummy(pdev))
2680 if (!iommu_identity_mapping)
2683 found = identity_mapping(pdev);
2685 if (iommu_should_identity_map(pdev, 0))
2689 * 32 bit DMA is removed from si_domain and fall back
2690 * to non-identity mapping.
2692 domain_remove_one_dev_info(si_domain, pdev);
2693 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2699 * In case of a detached 64 bit DMA device from vm, the device
2700 * is put into si_domain for identity mapping.
2702 if (iommu_should_identity_map(pdev, 0)) {
2704 ret = domain_add_dev_info(si_domain, pdev,
2706 CONTEXT_TT_PASS_THROUGH :
2707 CONTEXT_TT_MULTI_LEVEL);
2709 printk(KERN_INFO "64bit %s uses identity mapping\n",
2719 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2720 size_t size, int dir, u64 dma_mask)
2722 struct pci_dev *pdev = to_pci_dev(hwdev);
2723 struct dmar_domain *domain;
2724 phys_addr_t start_paddr;
2728 struct intel_iommu *iommu;
2729 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2731 BUG_ON(dir == DMA_NONE);
2733 if (iommu_no_mapping(hwdev))
2736 domain = get_valid_domain_for_dev(pdev);
2740 iommu = domain_get_iommu(domain);
2741 size = aligned_nrpages(paddr, size);
2743 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2748 * Check if DMAR supports zero-length reads on write only
2751 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2752 !cap_zlr(iommu->cap))
2753 prot |= DMA_PTE_READ;
2754 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2755 prot |= DMA_PTE_WRITE;
2757 * paddr - (paddr + size) might be partial page, we should map the whole
2758 * page. Note: if two part of one page are separately mapped, we
2759 * might have two guest_addr mapping to the same host paddr, but this
2760 * is not a big problem
2762 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2763 mm_to_dma_pfn(paddr_pfn), size, prot);
2767 /* it's a non-present to present mapping. Only flush if caching mode */
2768 if (cap_caching_mode(iommu->cap))
2769 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2771 iommu_flush_write_buffer(iommu);
2773 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2774 start_paddr += paddr & ~PAGE_MASK;
2779 __free_iova(&domain->iovad, iova);
2780 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2781 pci_name(pdev), size, (unsigned long long)paddr, dir);
2785 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2786 unsigned long offset, size_t size,
2787 enum dma_data_direction dir,
2788 struct dma_attrs *attrs)
2790 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2791 dir, to_pci_dev(dev)->dma_mask);
2794 static void flush_unmaps(void)
2800 /* just flush them all */
2801 for (i = 0; i < g_num_of_iommus; i++) {
2802 struct intel_iommu *iommu = g_iommus[i];
2806 if (!deferred_flush[i].next)
2809 /* In caching mode, global flushes turn emulation expensive */
2810 if (!cap_caching_mode(iommu->cap))
2811 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2812 DMA_TLB_GLOBAL_FLUSH);
2813 for (j = 0; j < deferred_flush[i].next; j++) {
2815 struct iova *iova = deferred_flush[i].iova[j];
2816 struct dmar_domain *domain = deferred_flush[i].domain[j];
2818 /* On real hardware multiple invalidations are expensive */
2819 if (cap_caching_mode(iommu->cap))
2820 iommu_flush_iotlb_psi(iommu, domain->id,
2821 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2823 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2824 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2825 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2827 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2829 deferred_flush[i].next = 0;
2835 static void flush_unmaps_timeout(unsigned long data)
2837 unsigned long flags;
2839 spin_lock_irqsave(&async_umap_flush_lock, flags);
2841 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2844 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2846 unsigned long flags;
2848 struct intel_iommu *iommu;
2850 spin_lock_irqsave(&async_umap_flush_lock, flags);
2851 if (list_size == HIGH_WATER_MARK)
2854 iommu = domain_get_iommu(dom);
2855 iommu_id = iommu->seq_id;
2857 next = deferred_flush[iommu_id].next;
2858 deferred_flush[iommu_id].domain[next] = dom;
2859 deferred_flush[iommu_id].iova[next] = iova;
2860 deferred_flush[iommu_id].next++;
2863 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2867 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2870 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2871 size_t size, enum dma_data_direction dir,
2872 struct dma_attrs *attrs)
2874 struct pci_dev *pdev = to_pci_dev(dev);
2875 struct dmar_domain *domain;
2876 unsigned long start_pfn, last_pfn;
2878 struct intel_iommu *iommu;
2880 if (iommu_no_mapping(dev))
2883 domain = find_domain(pdev);
2886 iommu = domain_get_iommu(domain);
2888 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2889 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2890 (unsigned long long)dev_addr))
2893 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2894 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2896 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2897 pci_name(pdev), start_pfn, last_pfn);
2899 /* clear the whole page */
2900 dma_pte_clear_range(domain, start_pfn, last_pfn);
2902 /* free page tables */
2903 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2905 if (intel_iommu_strict) {
2906 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2907 last_pfn - start_pfn + 1, 0);
2909 __free_iova(&domain->iovad, iova);
2911 add_unmap(domain, iova);
2913 * queue up the release of the unmap to save the 1/6th of the
2914 * cpu used up by the iotlb flush operation...
2919 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2920 dma_addr_t *dma_handle, gfp_t flags)
2925 size = PAGE_ALIGN(size);
2926 order = get_order(size);
2928 if (!iommu_no_mapping(hwdev))
2929 flags &= ~(GFP_DMA | GFP_DMA32);
2930 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2931 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2937 vaddr = (void *)__get_free_pages(flags, order);
2940 memset(vaddr, 0, size);
2942 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2944 hwdev->coherent_dma_mask);
2947 free_pages((unsigned long)vaddr, order);
2951 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2952 dma_addr_t dma_handle)
2956 size = PAGE_ALIGN(size);
2957 order = get_order(size);
2959 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2960 free_pages((unsigned long)vaddr, order);
2963 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2964 int nelems, enum dma_data_direction dir,
2965 struct dma_attrs *attrs)
2967 struct pci_dev *pdev = to_pci_dev(hwdev);
2968 struct dmar_domain *domain;
2969 unsigned long start_pfn, last_pfn;
2971 struct intel_iommu *iommu;
2973 if (iommu_no_mapping(hwdev))
2976 domain = find_domain(pdev);
2979 iommu = domain_get_iommu(domain);
2981 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2982 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2983 (unsigned long long)sglist[0].dma_address))
2986 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2987 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2989 /* clear the whole page */
2990 dma_pte_clear_range(domain, start_pfn, last_pfn);
2992 /* free page tables */
2993 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2995 if (intel_iommu_strict) {
2996 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2997 last_pfn - start_pfn + 1, 0);
2999 __free_iova(&domain->iovad, iova);
3001 add_unmap(domain, iova);
3003 * queue up the release of the unmap to save the 1/6th of the
3004 * cpu used up by the iotlb flush operation...
3009 static int intel_nontranslate_map_sg(struct device *hddev,
3010 struct scatterlist *sglist, int nelems, int dir)
3013 struct scatterlist *sg;
3015 for_each_sg(sglist, sg, nelems, i) {
3016 BUG_ON(!sg_page(sg));
3017 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3018 sg->dma_length = sg->length;
3023 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3024 enum dma_data_direction dir, struct dma_attrs *attrs)
3027 struct pci_dev *pdev = to_pci_dev(hwdev);
3028 struct dmar_domain *domain;
3031 struct iova *iova = NULL;
3033 struct scatterlist *sg;
3034 unsigned long start_vpfn;
3035 struct intel_iommu *iommu;
3037 BUG_ON(dir == DMA_NONE);
3038 if (iommu_no_mapping(hwdev))
3039 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3041 domain = get_valid_domain_for_dev(pdev);
3045 iommu = domain_get_iommu(domain);
3047 for_each_sg(sglist, sg, nelems, i)
3048 size += aligned_nrpages(sg->offset, sg->length);
3050 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3053 sglist->dma_length = 0;
3058 * Check if DMAR supports zero-length reads on write only
3061 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3062 !cap_zlr(iommu->cap))
3063 prot |= DMA_PTE_READ;
3064 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3065 prot |= DMA_PTE_WRITE;
3067 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3069 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3070 if (unlikely(ret)) {
3071 /* clear the page */
3072 dma_pte_clear_range(domain, start_vpfn,
3073 start_vpfn + size - 1);
3074 /* free page tables */
3075 dma_pte_free_pagetable(domain, start_vpfn,
3076 start_vpfn + size - 1);
3078 __free_iova(&domain->iovad, iova);
3082 /* it's a non-present to present mapping. Only flush if caching mode */
3083 if (cap_caching_mode(iommu->cap))
3084 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3086 iommu_flush_write_buffer(iommu);
3091 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3096 struct dma_map_ops intel_dma_ops = {
3097 .alloc_coherent = intel_alloc_coherent,
3098 .free_coherent = intel_free_coherent,
3099 .map_sg = intel_map_sg,
3100 .unmap_sg = intel_unmap_sg,
3101 .map_page = intel_map_page,
3102 .unmap_page = intel_unmap_page,
3103 .mapping_error = intel_mapping_error,
3106 static inline int iommu_domain_cache_init(void)
3110 iommu_domain_cache = kmem_cache_create("iommu_domain",
3111 sizeof(struct dmar_domain),
3116 if (!iommu_domain_cache) {
3117 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3124 static inline int iommu_devinfo_cache_init(void)
3128 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3129 sizeof(struct device_domain_info),
3133 if (!iommu_devinfo_cache) {
3134 printk(KERN_ERR "Couldn't create devinfo cache\n");
3141 static inline int iommu_iova_cache_init(void)
3145 iommu_iova_cache = kmem_cache_create("iommu_iova",
3146 sizeof(struct iova),
3150 if (!iommu_iova_cache) {
3151 printk(KERN_ERR "Couldn't create iova cache\n");
3158 static int __init iommu_init_mempool(void)
3161 ret = iommu_iova_cache_init();
3165 ret = iommu_domain_cache_init();
3169 ret = iommu_devinfo_cache_init();
3173 kmem_cache_destroy(iommu_domain_cache);
3175 kmem_cache_destroy(iommu_iova_cache);
3180 static void __init iommu_exit_mempool(void)
3182 kmem_cache_destroy(iommu_devinfo_cache);
3183 kmem_cache_destroy(iommu_domain_cache);
3184 kmem_cache_destroy(iommu_iova_cache);
3188 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3190 struct dmar_drhd_unit *drhd;
3194 /* We know that this device on this chipset has its own IOMMU.
3195 * If we find it under a different IOMMU, then the BIOS is lying
3196 * to us. Hope that the IOMMU for this device is actually
3197 * disabled, and it needs no translation...
3199 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3201 /* "can't" happen */
3202 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3205 vtbar &= 0xffff0000;
3207 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3208 drhd = dmar_find_matched_drhd_unit(pdev);
3209 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3210 TAINT_FIRMWARE_WORKAROUND,
3211 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3212 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3214 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3216 static void __init init_no_remapping_devices(void)
3218 struct dmar_drhd_unit *drhd;
3220 for_each_drhd_unit(drhd) {
3221 if (!drhd->include_all) {
3223 for (i = 0; i < drhd->devices_cnt; i++)
3224 if (drhd->devices[i] != NULL)
3226 /* ignore DMAR unit if no pci devices exist */
3227 if (i == drhd->devices_cnt)
3232 for_each_drhd_unit(drhd) {
3234 if (drhd->ignored || drhd->include_all)
3237 for (i = 0; i < drhd->devices_cnt; i++)
3238 if (drhd->devices[i] &&
3239 !IS_GFX_DEVICE(drhd->devices[i]))
3242 if (i < drhd->devices_cnt)
3245 /* This IOMMU has *only* gfx devices. Either bypass it or
3246 set the gfx_mapped flag, as appropriate */
3248 intel_iommu_gfx_mapped = 1;
3251 for (i = 0; i < drhd->devices_cnt; i++) {
3252 if (!drhd->devices[i])
3254 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3260 #ifdef CONFIG_SUSPEND
3261 static int init_iommu_hw(void)
3263 struct dmar_drhd_unit *drhd;
3264 struct intel_iommu *iommu = NULL;
3266 for_each_active_iommu(iommu, drhd)
3268 dmar_reenable_qi(iommu);
3270 for_each_iommu(iommu, drhd) {
3271 if (drhd->ignored) {
3273 * we always have to disable PMRs or DMA may fail on
3277 iommu_disable_protect_mem_regions(iommu);
3281 iommu_flush_write_buffer(iommu);
3283 iommu_set_root_entry(iommu);
3285 iommu->flush.flush_context(iommu, 0, 0, 0,
3286 DMA_CCMD_GLOBAL_INVL);
3287 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3288 DMA_TLB_GLOBAL_FLUSH);
3289 if (iommu_enable_translation(iommu))
3291 iommu_disable_protect_mem_regions(iommu);
3297 static void iommu_flush_all(void)
3299 struct dmar_drhd_unit *drhd;
3300 struct intel_iommu *iommu;
3302 for_each_active_iommu(iommu, drhd) {
3303 iommu->flush.flush_context(iommu, 0, 0, 0,
3304 DMA_CCMD_GLOBAL_INVL);
3305 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3306 DMA_TLB_GLOBAL_FLUSH);
3310 static int iommu_suspend(void)
3312 struct dmar_drhd_unit *drhd;
3313 struct intel_iommu *iommu = NULL;
3316 for_each_active_iommu(iommu, drhd) {
3317 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3319 if (!iommu->iommu_state)
3325 for_each_active_iommu(iommu, drhd) {
3326 iommu_disable_translation(iommu);
3328 spin_lock_irqsave(&iommu->register_lock, flag);
3330 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3331 readl(iommu->reg + DMAR_FECTL_REG);
3332 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3333 readl(iommu->reg + DMAR_FEDATA_REG);
3334 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3335 readl(iommu->reg + DMAR_FEADDR_REG);
3336 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3337 readl(iommu->reg + DMAR_FEUADDR_REG);
3339 spin_unlock_irqrestore(&iommu->register_lock, flag);
3344 for_each_active_iommu(iommu, drhd)
3345 kfree(iommu->iommu_state);
3350 static void iommu_resume(void)
3352 struct dmar_drhd_unit *drhd;
3353 struct intel_iommu *iommu = NULL;
3356 if (init_iommu_hw()) {
3358 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3360 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3364 for_each_active_iommu(iommu, drhd) {
3366 spin_lock_irqsave(&iommu->register_lock, flag);
3368 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3369 iommu->reg + DMAR_FECTL_REG);
3370 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3371 iommu->reg + DMAR_FEDATA_REG);
3372 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3373 iommu->reg + DMAR_FEADDR_REG);
3374 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3375 iommu->reg + DMAR_FEUADDR_REG);
3377 spin_unlock_irqrestore(&iommu->register_lock, flag);
3380 for_each_active_iommu(iommu, drhd)
3381 kfree(iommu->iommu_state);
3384 static struct syscore_ops iommu_syscore_ops = {
3385 .resume = iommu_resume,
3386 .suspend = iommu_suspend,
3389 static void __init init_iommu_pm_ops(void)
3391 register_syscore_ops(&iommu_syscore_ops);
3395 static inline void init_iommu_pm_ops(void) {}
3396 #endif /* CONFIG_PM */
3399 * Here we only respond to action of unbound device from driver.
3401 * Added device is not attached to its DMAR domain here yet. That will happen
3402 * when mapping the device to iova.
3404 static int device_notifier(struct notifier_block *nb,
3405 unsigned long action, void *data)
3407 struct device *dev = data;
3408 struct pci_dev *pdev = to_pci_dev(dev);
3409 struct dmar_domain *domain;
3411 if (iommu_no_mapping(dev))
3414 domain = find_domain(pdev);
3418 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3419 domain_remove_one_dev_info(domain, pdev);
3421 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3422 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3423 list_empty(&domain->devices))
3424 domain_exit(domain);
3430 static struct notifier_block device_nb = {
3431 .notifier_call = device_notifier,
3434 int __init intel_iommu_init(void)
3438 /* VT-d is required for a TXT/tboot launch, so enforce that */
3439 force_on = tboot_force_iommu();
3441 if (dmar_table_init()) {
3443 panic("tboot: Failed to initialize DMAR table\n");
3447 if (dmar_dev_scope_init()) {
3449 panic("tboot: Failed to initialize DMAR device scope\n");
3454 * Check the need for DMA-remapping initialization now.
3455 * Above initialization will also be used by Interrupt-remapping.
3457 if (no_iommu || dmar_disabled)
3460 if (iommu_init_mempool()) {
3462 panic("tboot: Failed to initialize iommu memory\n");
3466 if (dmar_init_reserved_ranges()) {
3468 panic("tboot: Failed to reserve iommu ranges\n");
3472 init_no_remapping_devices();
3477 panic("tboot: Failed to initialize DMARs\n");
3478 printk(KERN_ERR "IOMMU: dmar init failed\n");
3479 put_iova_domain(&reserved_iova_list);
3480 iommu_exit_mempool();
3484 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3486 init_timer(&unmap_timer);
3487 #ifdef CONFIG_SWIOTLB
3490 dma_ops = &intel_dma_ops;
3492 init_iommu_pm_ops();
3494 register_iommu(&intel_iommu_ops);
3496 bus_register_notifier(&pci_bus_type, &device_nb);
3501 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3502 struct pci_dev *pdev)
3504 struct pci_dev *tmp, *parent;
3506 if (!iommu || !pdev)
3509 /* dependent device detach */
3510 tmp = pci_find_upstream_pcie_bridge(pdev);
3511 /* Secondary interface's bus number and devfn 0 */
3513 parent = pdev->bus->self;
3514 while (parent != tmp) {
3515 iommu_detach_dev(iommu, parent->bus->number,
3517 parent = parent->bus->self;
3519 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3520 iommu_detach_dev(iommu,
3521 tmp->subordinate->number, 0);
3522 else /* this is a legacy PCI bridge */
3523 iommu_detach_dev(iommu, tmp->bus->number,
3528 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3529 struct pci_dev *pdev)
3531 struct device_domain_info *info;
3532 struct intel_iommu *iommu;
3533 unsigned long flags;
3535 struct list_head *entry, *tmp;
3537 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3542 spin_lock_irqsave(&device_domain_lock, flags);
3543 list_for_each_safe(entry, tmp, &domain->devices) {
3544 info = list_entry(entry, struct device_domain_info, link);
3545 if (info->segment == pci_domain_nr(pdev->bus) &&
3546 info->bus == pdev->bus->number &&
3547 info->devfn == pdev->devfn) {
3548 list_del(&info->link);
3549 list_del(&info->global);
3551 info->dev->dev.archdata.iommu = NULL;
3552 spin_unlock_irqrestore(&device_domain_lock, flags);
3554 iommu_disable_dev_iotlb(info);
3555 iommu_detach_dev(iommu, info->bus, info->devfn);
3556 iommu_detach_dependent_devices(iommu, pdev);
3557 free_devinfo_mem(info);
3559 spin_lock_irqsave(&device_domain_lock, flags);
3567 /* if there is no other devices under the same iommu
3568 * owned by this domain, clear this iommu in iommu_bmp
3569 * update iommu count and coherency
3571 if (iommu == device_to_iommu(info->segment, info->bus,
3577 unsigned long tmp_flags;
3578 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3579 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3580 domain->iommu_count--;
3581 domain_update_iommu_cap(domain);
3582 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3584 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3585 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3586 spin_lock_irqsave(&iommu->lock, tmp_flags);
3587 clear_bit(domain->id, iommu->domain_ids);
3588 iommu->domains[domain->id] = NULL;
3589 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3593 spin_unlock_irqrestore(&device_domain_lock, flags);
3596 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3598 struct device_domain_info *info;
3599 struct intel_iommu *iommu;
3600 unsigned long flags1, flags2;
3602 spin_lock_irqsave(&device_domain_lock, flags1);
3603 while (!list_empty(&domain->devices)) {
3604 info = list_entry(domain->devices.next,
3605 struct device_domain_info, link);
3606 list_del(&info->link);
3607 list_del(&info->global);
3609 info->dev->dev.archdata.iommu = NULL;
3611 spin_unlock_irqrestore(&device_domain_lock, flags1);
3613 iommu_disable_dev_iotlb(info);
3614 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3615 iommu_detach_dev(iommu, info->bus, info->devfn);
3616 iommu_detach_dependent_devices(iommu, info->dev);
3618 /* clear this iommu in iommu_bmp, update iommu count
3621 spin_lock_irqsave(&domain->iommu_lock, flags2);
3622 if (test_and_clear_bit(iommu->seq_id,
3623 &domain->iommu_bmp)) {
3624 domain->iommu_count--;
3625 domain_update_iommu_cap(domain);
3627 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3629 free_devinfo_mem(info);
3630 spin_lock_irqsave(&device_domain_lock, flags1);
3632 spin_unlock_irqrestore(&device_domain_lock, flags1);
3635 /* domain id for virtual machine, it won't be set in context */
3636 static unsigned long vm_domid;
3638 static struct dmar_domain *iommu_alloc_vm_domain(void)
3640 struct dmar_domain *domain;
3642 domain = alloc_domain_mem();
3646 domain->id = vm_domid++;
3648 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3649 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3654 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3658 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3659 spin_lock_init(&domain->iommu_lock);
3661 domain_reserve_special_ranges(domain);
3663 /* calculate AGAW */
3664 domain->gaw = guest_width;
3665 adjust_width = guestwidth_to_adjustwidth(guest_width);
3666 domain->agaw = width_to_agaw(adjust_width);
3668 INIT_LIST_HEAD(&domain->devices);
3670 domain->iommu_count = 0;
3671 domain->iommu_coherency = 0;
3672 domain->iommu_snooping = 0;
3673 domain->iommu_superpage = 0;
3674 domain->max_addr = 0;
3677 /* always allocate the top pgd */
3678 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3681 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3685 static void iommu_free_vm_domain(struct dmar_domain *domain)
3687 unsigned long flags;
3688 struct dmar_drhd_unit *drhd;
3689 struct intel_iommu *iommu;
3691 unsigned long ndomains;
3693 for_each_drhd_unit(drhd) {
3696 iommu = drhd->iommu;
3698 ndomains = cap_ndoms(iommu->cap);
3699 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3700 if (iommu->domains[i] == domain) {
3701 spin_lock_irqsave(&iommu->lock, flags);
3702 clear_bit(i, iommu->domain_ids);
3703 iommu->domains[i] = NULL;
3704 spin_unlock_irqrestore(&iommu->lock, flags);
3711 static void vm_domain_exit(struct dmar_domain *domain)
3713 /* Domain 0 is reserved, so dont process it */
3717 vm_domain_remove_all_dev_info(domain);
3719 put_iova_domain(&domain->iovad);
3722 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3724 /* free page tables */
3725 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3727 iommu_free_vm_domain(domain);
3728 free_domain_mem(domain);
3731 static int intel_iommu_domain_init(struct iommu_domain *domain)
3733 struct dmar_domain *dmar_domain;
3735 dmar_domain = iommu_alloc_vm_domain();
3738 "intel_iommu_domain_init: dmar_domain == NULL\n");
3741 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3743 "intel_iommu_domain_init() failed\n");
3744 vm_domain_exit(dmar_domain);
3747 domain->priv = dmar_domain;
3752 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3754 struct dmar_domain *dmar_domain = domain->priv;
3756 domain->priv = NULL;
3757 vm_domain_exit(dmar_domain);
3760 static int intel_iommu_attach_device(struct iommu_domain *domain,
3763 struct dmar_domain *dmar_domain = domain->priv;
3764 struct pci_dev *pdev = to_pci_dev(dev);
3765 struct intel_iommu *iommu;
3768 /* normally pdev is not mapped */
3769 if (unlikely(domain_context_mapped(pdev))) {
3770 struct dmar_domain *old_domain;
3772 old_domain = find_domain(pdev);
3774 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3775 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3776 domain_remove_one_dev_info(old_domain, pdev);
3778 domain_remove_dev_info(old_domain);
3782 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3787 /* check if this iommu agaw is sufficient for max mapped address */
3788 addr_width = agaw_to_width(iommu->agaw);
3789 if (addr_width > cap_mgaw(iommu->cap))
3790 addr_width = cap_mgaw(iommu->cap);
3792 if (dmar_domain->max_addr > (1LL << addr_width)) {
3793 printk(KERN_ERR "%s: iommu width (%d) is not "
3794 "sufficient for the mapped address (%llx)\n",
3795 __func__, addr_width, dmar_domain->max_addr);
3798 dmar_domain->gaw = addr_width;
3801 * Knock out extra levels of page tables if necessary
3803 while (iommu->agaw < dmar_domain->agaw) {
3804 struct dma_pte *pte;
3806 pte = dmar_domain->pgd;
3807 if (dma_pte_present(pte)) {
3808 dmar_domain->pgd = (struct dma_pte *)
3809 phys_to_virt(dma_pte_addr(pte));
3810 free_pgtable_page(pte);
3812 dmar_domain->agaw--;
3815 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3818 static void intel_iommu_detach_device(struct iommu_domain *domain,
3821 struct dmar_domain *dmar_domain = domain->priv;
3822 struct pci_dev *pdev = to_pci_dev(dev);
3824 domain_remove_one_dev_info(dmar_domain, pdev);
3827 static int intel_iommu_map(struct iommu_domain *domain,
3828 unsigned long iova, phys_addr_t hpa,
3829 int gfp_order, int iommu_prot)
3831 struct dmar_domain *dmar_domain = domain->priv;
3837 if (iommu_prot & IOMMU_READ)
3838 prot |= DMA_PTE_READ;
3839 if (iommu_prot & IOMMU_WRITE)
3840 prot |= DMA_PTE_WRITE;
3841 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3842 prot |= DMA_PTE_SNP;
3844 size = PAGE_SIZE << gfp_order;
3845 max_addr = iova + size;
3846 if (dmar_domain->max_addr < max_addr) {
3849 /* check if minimum agaw is sufficient for mapped address */
3850 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
3851 if (end < max_addr) {
3852 printk(KERN_ERR "%s: iommu width (%d) is not "
3853 "sufficient for the mapped address (%llx)\n",
3854 __func__, dmar_domain->gaw, max_addr);
3857 dmar_domain->max_addr = max_addr;
3859 /* Round up size to next multiple of PAGE_SIZE, if it and
3860 the low bits of hpa would take us onto the next page */
3861 size = aligned_nrpages(hpa, size);
3862 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3863 hpa >> VTD_PAGE_SHIFT, size, prot);
3867 static int intel_iommu_unmap(struct iommu_domain *domain,
3868 unsigned long iova, int gfp_order)
3870 struct dmar_domain *dmar_domain = domain->priv;
3871 size_t size = PAGE_SIZE << gfp_order;
3873 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3874 (iova + size - 1) >> VTD_PAGE_SHIFT);
3876 if (dmar_domain->max_addr == iova + size)
3877 dmar_domain->max_addr = iova;
3882 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3885 struct dmar_domain *dmar_domain = domain->priv;
3886 struct dma_pte *pte;
3889 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
3891 phys = dma_pte_addr(pte);
3896 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3899 struct dmar_domain *dmar_domain = domain->priv;
3901 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3902 return dmar_domain->iommu_snooping;
3903 if (cap == IOMMU_CAP_INTR_REMAP)
3904 return intr_remapping_enabled;
3909 static struct iommu_ops intel_iommu_ops = {
3910 .domain_init = intel_iommu_domain_init,
3911 .domain_destroy = intel_iommu_domain_destroy,
3912 .attach_dev = intel_iommu_attach_device,
3913 .detach_dev = intel_iommu_detach_device,
3914 .map = intel_iommu_map,
3915 .unmap = intel_iommu_unmap,
3916 .iova_to_phys = intel_iommu_iova_to_phys,
3917 .domain_has_cap = intel_iommu_domain_has_cap,
3920 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3923 * Mobile 4 Series Chipset neglects to set RWBF capability,
3926 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3929 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
3930 if (dev->revision == 0x07) {
3931 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
3936 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3939 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
3940 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3941 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
3942 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
3943 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3944 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3945 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3946 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3948 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3952 if (pci_read_config_word(dev, GGC, &ggc))
3955 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3956 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3958 } else if (dmar_map_gfx) {
3959 /* we have to ensure the gfx device is idle before we flush */
3960 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
3961 intel_iommu_strict = 1;
3964 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3965 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3966 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3967 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3969 /* On Tylersburg chipsets, some BIOSes have been known to enable the
3970 ISOCH DMAR unit for the Azalia sound device, but not give it any
3971 TLB entries, which causes it to deadlock. Check for that. We do
3972 this in a function called from init_dmars(), instead of in a PCI
3973 quirk, because we don't want to print the obnoxious "BIOS broken"
3974 message if VT-d is actually disabled.
3976 static void __init check_tylersburg_isoch(void)
3978 struct pci_dev *pdev;
3979 uint32_t vtisochctrl;
3981 /* If there's no Azalia in the system anyway, forget it. */
3982 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3987 /* System Management Registers. Might be hidden, in which case
3988 we can't do the sanity check. But that's OK, because the
3989 known-broken BIOSes _don't_ actually hide it, so far. */
3990 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3994 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4001 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4002 if (vtisochctrl & 1)
4005 /* Drop all bits other than the number of TLB entries */
4006 vtisochctrl &= 0x1c;
4008 /* If we have the recommended number of TLB entries (16), fine. */
4009 if (vtisochctrl == 0x10)
4012 /* Zero TLB entries? You get to ride the short bus to school. */
4014 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4015 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4016 dmi_get_system_info(DMI_BIOS_VENDOR),
4017 dmi_get_system_info(DMI_BIOS_VERSION),
4018 dmi_get_system_info(DMI_PRODUCT_VERSION));
4019 iommu_identity_mapping |= IDENTMAP_AZALIA;
4023 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",