2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
22 #define dev_fmt(fmt) pr_fmt(fmt)
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/memory.h>
37 #include <linux/cpu.h>
38 #include <linux/timer.h>
40 #include <linux/iova.h>
41 #include <linux/iommu.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/syscore_ops.h>
44 #include <linux/tboot.h>
45 #include <linux/dmi.h>
46 #include <linux/pci-ats.h>
47 #include <linux/memblock.h>
48 #include <linux/dma-contiguous.h>
49 #include <linux/dma-direct.h>
50 #include <linux/crash_dump.h>
51 #include <linux/numa.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/cacheflush.h>
54 #include <asm/iommu.h>
56 #include "irq_remapping.h"
57 #include "intel-pasid.h"
59 #define ROOT_SIZE VTD_PAGE_SIZE
60 #define CONTEXT_SIZE VTD_PAGE_SIZE
62 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
63 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
64 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
65 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
67 #define IOAPIC_RANGE_START (0xfee00000)
68 #define IOAPIC_RANGE_END (0xfeefffff)
69 #define IOVA_START_ADDR (0x1000)
71 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
73 #define MAX_AGAW_WIDTH 64
74 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
76 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
77 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
79 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
80 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
81 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
82 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
83 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
85 /* IO virtual address start page frame number */
86 #define IOVA_START_PFN (1)
88 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
90 /* page table handling */
91 #define LEVEL_STRIDE (9)
92 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
95 * This bitmap is used to advertise the page sizes our hardware support
96 * to the IOMMU core, which will then use this information to split
97 * physically contiguous memory regions it is mapping into page sizes
100 * Traditionally the IOMMU core just handed us the mappings directly,
101 * after making sure the size is an order of a 4KiB page and that the
102 * mapping has natural alignment.
104 * To retain this behavior, we currently advertise that we support
105 * all page sizes that are an order of 4KiB.
107 * If at some point we'd like to utilize the IOMMU core's new behavior,
108 * we could change this to advertise the real page sizes we support.
110 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
112 static inline int agaw_to_level(int agaw)
117 static inline int agaw_to_width(int agaw)
119 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
122 static inline int width_to_agaw(int width)
124 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
127 static inline unsigned int level_to_offset_bits(int level)
129 return (level - 1) * LEVEL_STRIDE;
132 static inline int pfn_level_offset(unsigned long pfn, int level)
134 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
137 static inline unsigned long level_mask(int level)
139 return -1UL << level_to_offset_bits(level);
142 static inline unsigned long level_size(int level)
144 return 1UL << level_to_offset_bits(level);
147 static inline unsigned long align_to_level(unsigned long pfn, int level)
149 return (pfn + level_size(level) - 1) & level_mask(level);
152 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
154 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
157 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
158 are never going to work. */
159 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
161 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
166 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
168 static inline unsigned long page_to_dma_pfn(struct page *pg)
170 return mm_to_dma_pfn(page_to_pfn(pg));
172 static inline unsigned long virt_to_dma_pfn(void *p)
174 return page_to_dma_pfn(virt_to_page(p));
177 /* global iommu list, set NULL for ignored DMAR units */
178 static struct intel_iommu **g_iommus;
180 static void __init check_tylersburg_isoch(void);
181 static int rwbf_quirk;
184 * set to 1 to panic kernel if can't successfully enable VT-d
185 * (used when kernel is launched w/ TXT)
187 static int force_on = 0;
188 int intel_iommu_tboot_noforce;
189 static int no_platform_optin;
191 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
194 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
197 static phys_addr_t root_entry_lctp(struct root_entry *re)
202 return re->lo & VTD_PAGE_MASK;
206 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
209 static phys_addr_t root_entry_uctp(struct root_entry *re)
214 return re->hi & VTD_PAGE_MASK;
217 static inline void context_clear_pasid_enable(struct context_entry *context)
219 context->lo &= ~(1ULL << 11);
222 static inline bool context_pasid_enabled(struct context_entry *context)
224 return !!(context->lo & (1ULL << 11));
227 static inline void context_set_copied(struct context_entry *context)
229 context->hi |= (1ull << 3);
232 static inline bool context_copied(struct context_entry *context)
234 return !!(context->hi & (1ULL << 3));
237 static inline bool __context_present(struct context_entry *context)
239 return (context->lo & 1);
242 bool context_present(struct context_entry *context)
244 return context_pasid_enabled(context) ?
245 __context_present(context) :
246 __context_present(context) && !context_copied(context);
249 static inline void context_set_present(struct context_entry *context)
254 static inline void context_set_fault_enable(struct context_entry *context)
256 context->lo &= (((u64)-1) << 2) | 1;
259 static inline void context_set_translation_type(struct context_entry *context,
262 context->lo &= (((u64)-1) << 4) | 3;
263 context->lo |= (value & 3) << 2;
266 static inline void context_set_address_root(struct context_entry *context,
269 context->lo &= ~VTD_PAGE_MASK;
270 context->lo |= value & VTD_PAGE_MASK;
273 static inline void context_set_address_width(struct context_entry *context,
276 context->hi |= value & 7;
279 static inline void context_set_domain_id(struct context_entry *context,
282 context->hi |= (value & ((1 << 16) - 1)) << 8;
285 static inline int context_domain_id(struct context_entry *c)
287 return((c->hi >> 8) & 0xffff);
290 static inline void context_clear_entry(struct context_entry *context)
297 * This domain is a statically identity mapping domain.
298 * 1. This domain creats a static 1:1 mapping to all usable memory.
299 * 2. It maps to each iommu if successful.
300 * 3. Each iommu mapps to this domain if successful.
302 static struct dmar_domain *si_domain;
303 static int hw_pass_through = 1;
305 /* si_domain contains mulitple devices */
306 #define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
309 * This is a DMA domain allocated through the iommu domain allocation
310 * interface. But one or more devices belonging to this domain have
311 * been chosen to use a private domain. We should avoid to use the
312 * map/unmap/iova_to_phys APIs on it.
314 #define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
316 #define for_each_domain_iommu(idx, domain) \
317 for (idx = 0; idx < g_num_of_iommus; idx++) \
318 if (domain->iommu_refcnt[idx])
320 struct dmar_rmrr_unit {
321 struct list_head list; /* list of rmrr units */
322 struct acpi_dmar_header *hdr; /* ACPI header */
323 u64 base_address; /* reserved base address*/
324 u64 end_address; /* reserved end address */
325 struct dmar_dev_scope *devices; /* target devices */
326 int devices_cnt; /* target device count */
327 struct iommu_resv_region *resv; /* reserved region handle */
330 struct dmar_atsr_unit {
331 struct list_head list; /* list of ATSR units */
332 struct acpi_dmar_header *hdr; /* ACPI header */
333 struct dmar_dev_scope *devices; /* target devices */
334 int devices_cnt; /* target device count */
335 u8 include_all:1; /* include all ports */
338 static LIST_HEAD(dmar_atsr_units);
339 static LIST_HEAD(dmar_rmrr_units);
341 #define for_each_rmrr_units(rmrr) \
342 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
344 /* bitmap for indexing intel_iommus */
345 static int g_num_of_iommus;
347 static void domain_exit(struct dmar_domain *domain);
348 static void domain_remove_dev_info(struct dmar_domain *domain);
349 static void dmar_remove_one_dev_info(struct device *dev);
350 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
351 static void domain_context_clear(struct intel_iommu *iommu,
353 static int domain_detach_iommu(struct dmar_domain *domain,
354 struct intel_iommu *iommu);
355 static bool device_is_rmrr_locked(struct device *dev);
356 static int intel_iommu_attach_device(struct iommu_domain *domain,
359 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
360 int dmar_disabled = 0;
362 int dmar_disabled = 1;
363 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
366 int intel_iommu_enabled = 0;
367 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
369 static int dmar_map_gfx = 1;
370 static int dmar_forcedac;
371 static int intel_iommu_strict;
372 static int intel_iommu_superpage = 1;
373 static int iommu_identity_mapping;
375 #define IDENTMAP_ALL 1
376 #define IDENTMAP_GFX 2
377 #define IDENTMAP_AZALIA 4
379 int intel_iommu_gfx_mapped;
380 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
382 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
383 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
384 static DEFINE_SPINLOCK(device_domain_lock);
385 static LIST_HEAD(device_domain_list);
388 * Iterate over elements in device_domain_list and call the specified
389 * callback @fn against each element.
391 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
392 void *data), void *data)
396 struct device_domain_info *info;
398 spin_lock_irqsave(&device_domain_lock, flags);
399 list_for_each_entry(info, &device_domain_list, global) {
400 ret = fn(info, data);
402 spin_unlock_irqrestore(&device_domain_lock, flags);
406 spin_unlock_irqrestore(&device_domain_lock, flags);
411 const struct iommu_ops intel_iommu_ops;
413 static bool translation_pre_enabled(struct intel_iommu *iommu)
415 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
418 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
420 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
423 static void init_translation_status(struct intel_iommu *iommu)
427 gsts = readl(iommu->reg + DMAR_GSTS_REG);
428 if (gsts & DMA_GSTS_TES)
429 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
432 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
433 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
435 return container_of(dom, struct dmar_domain, domain);
438 static int __init intel_iommu_setup(char *str)
443 if (!strncmp(str, "on", 2)) {
445 pr_info("IOMMU enabled\n");
446 } else if (!strncmp(str, "off", 3)) {
448 no_platform_optin = 1;
449 pr_info("IOMMU disabled\n");
450 } else if (!strncmp(str, "igfx_off", 8)) {
452 pr_info("Disable GFX device mapping\n");
453 } else if (!strncmp(str, "forcedac", 8)) {
454 pr_info("Forcing DAC for PCI devices\n");
456 } else if (!strncmp(str, "strict", 6)) {
457 pr_info("Disable batched IOTLB flush\n");
458 intel_iommu_strict = 1;
459 } else if (!strncmp(str, "sp_off", 6)) {
460 pr_info("Disable supported super page\n");
461 intel_iommu_superpage = 0;
462 } else if (!strncmp(str, "sm_on", 5)) {
463 pr_info("Intel-IOMMU: scalable mode supported\n");
465 } else if (!strncmp(str, "tboot_noforce", 13)) {
467 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
468 intel_iommu_tboot_noforce = 1;
471 str += strcspn(str, ",");
477 __setup("intel_iommu=", intel_iommu_setup);
479 static struct kmem_cache *iommu_domain_cache;
480 static struct kmem_cache *iommu_devinfo_cache;
482 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
484 struct dmar_domain **domains;
487 domains = iommu->domains[idx];
491 return domains[did & 0xff];
494 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
495 struct dmar_domain *domain)
497 struct dmar_domain **domains;
500 if (!iommu->domains[idx]) {
501 size_t size = 256 * sizeof(struct dmar_domain *);
502 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
505 domains = iommu->domains[idx];
506 if (WARN_ON(!domains))
509 domains[did & 0xff] = domain;
512 void *alloc_pgtable_page(int node)
517 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
519 vaddr = page_address(page);
523 void free_pgtable_page(void *vaddr)
525 free_page((unsigned long)vaddr);
528 static inline void *alloc_domain_mem(void)
530 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
533 static void free_domain_mem(void *vaddr)
535 kmem_cache_free(iommu_domain_cache, vaddr);
538 static inline void * alloc_devinfo_mem(void)
540 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
543 static inline void free_devinfo_mem(void *vaddr)
545 kmem_cache_free(iommu_devinfo_cache, vaddr);
548 static inline int domain_type_is_si(struct dmar_domain *domain)
550 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
553 static inline int domain_pfn_supported(struct dmar_domain *domain,
556 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
558 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
561 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
566 sagaw = cap_sagaw(iommu->cap);
567 for (agaw = width_to_agaw(max_gaw);
569 if (test_bit(agaw, &sagaw))
577 * Calculate max SAGAW for each iommu.
579 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
581 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
585 * calculate agaw for each iommu.
586 * "SAGAW" may be different across iommus, use a default agaw, and
587 * get a supported less agaw for iommus that don't support the default agaw.
589 int iommu_calculate_agaw(struct intel_iommu *iommu)
591 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
594 /* This functionin only returns single iommu in a domain */
595 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
599 /* si_domain and vm domain should not get here. */
600 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
603 for_each_domain_iommu(iommu_id, domain)
606 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
609 return g_iommus[iommu_id];
612 static void domain_update_iommu_coherency(struct dmar_domain *domain)
614 struct dmar_drhd_unit *drhd;
615 struct intel_iommu *iommu;
619 domain->iommu_coherency = 1;
621 for_each_domain_iommu(i, domain) {
623 if (!ecap_coherent(g_iommus[i]->ecap)) {
624 domain->iommu_coherency = 0;
631 /* No hardware attached; use lowest common denominator */
633 for_each_active_iommu(iommu, drhd) {
634 if (!ecap_coherent(iommu->ecap)) {
635 domain->iommu_coherency = 0;
642 static int domain_update_iommu_snooping(struct intel_iommu *skip)
644 struct dmar_drhd_unit *drhd;
645 struct intel_iommu *iommu;
649 for_each_active_iommu(iommu, drhd) {
651 if (!ecap_sc_support(iommu->ecap)) {
662 static int domain_update_iommu_superpage(struct intel_iommu *skip)
664 struct dmar_drhd_unit *drhd;
665 struct intel_iommu *iommu;
668 if (!intel_iommu_superpage) {
672 /* set iommu_superpage to the smallest common denominator */
674 for_each_active_iommu(iommu, drhd) {
676 mask &= cap_super_page_val(iommu->cap);
686 /* Some capabilities may be different across iommus */
687 static void domain_update_iommu_cap(struct dmar_domain *domain)
689 domain_update_iommu_coherency(domain);
690 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
691 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
694 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
697 struct root_entry *root = &iommu->root_entry[bus];
698 struct context_entry *context;
702 if (sm_supported(iommu)) {
710 context = phys_to_virt(*entry & VTD_PAGE_MASK);
712 unsigned long phy_addr;
716 context = alloc_pgtable_page(iommu->node);
720 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
721 phy_addr = virt_to_phys((void *)context);
722 *entry = phy_addr | 1;
723 __iommu_flush_cache(iommu, entry, sizeof(*entry));
725 return &context[devfn];
728 static int iommu_dummy(struct device *dev)
730 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
733 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
735 struct dmar_drhd_unit *drhd = NULL;
736 struct intel_iommu *iommu;
738 struct pci_dev *ptmp, *pdev = NULL;
742 if (iommu_dummy(dev))
745 if (dev_is_pci(dev)) {
746 struct pci_dev *pf_pdev;
748 pdev = to_pci_dev(dev);
751 /* VMD child devices currently cannot be handled individually */
752 if (is_vmd(pdev->bus))
756 /* VFs aren't listed in scope tables; we need to look up
757 * the PF instead to find the IOMMU. */
758 pf_pdev = pci_physfn(pdev);
760 segment = pci_domain_nr(pdev->bus);
761 } else if (has_acpi_companion(dev))
762 dev = &ACPI_COMPANION(dev)->dev;
765 for_each_active_iommu(iommu, drhd) {
766 if (pdev && segment != drhd->segment)
769 for_each_active_dev_scope(drhd->devices,
770 drhd->devices_cnt, i, tmp) {
772 /* For a VF use its original BDF# not that of the PF
773 * which we used for the IOMMU lookup. Strictly speaking
774 * we could do this for all PCI devices; we only need to
775 * get the BDF# from the scope table for ACPI matches. */
776 if (pdev && pdev->is_virtfn)
779 *bus = drhd->devices[i].bus;
780 *devfn = drhd->devices[i].devfn;
784 if (!pdev || !dev_is_pci(tmp))
787 ptmp = to_pci_dev(tmp);
788 if (ptmp->subordinate &&
789 ptmp->subordinate->number <= pdev->bus->number &&
790 ptmp->subordinate->busn_res.end >= pdev->bus->number)
794 if (pdev && drhd->include_all) {
796 *bus = pdev->bus->number;
797 *devfn = pdev->devfn;
808 static void domain_flush_cache(struct dmar_domain *domain,
809 void *addr, int size)
811 if (!domain->iommu_coherency)
812 clflush_cache_range(addr, size);
815 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
817 struct context_entry *context;
821 spin_lock_irqsave(&iommu->lock, flags);
822 context = iommu_context_addr(iommu, bus, devfn, 0);
824 ret = context_present(context);
825 spin_unlock_irqrestore(&iommu->lock, flags);
829 static void free_context_table(struct intel_iommu *iommu)
833 struct context_entry *context;
835 spin_lock_irqsave(&iommu->lock, flags);
836 if (!iommu->root_entry) {
839 for (i = 0; i < ROOT_ENTRY_NR; i++) {
840 context = iommu_context_addr(iommu, i, 0, 0);
842 free_pgtable_page(context);
844 if (!sm_supported(iommu))
847 context = iommu_context_addr(iommu, i, 0x80, 0);
849 free_pgtable_page(context);
852 free_pgtable_page(iommu->root_entry);
853 iommu->root_entry = NULL;
855 spin_unlock_irqrestore(&iommu->lock, flags);
858 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
859 unsigned long pfn, int *target_level)
861 struct dma_pte *parent, *pte;
862 int level = agaw_to_level(domain->agaw);
865 BUG_ON(!domain->pgd);
867 if (!domain_pfn_supported(domain, pfn))
868 /* Address beyond IOMMU's addressing capabilities. */
871 parent = domain->pgd;
876 offset = pfn_level_offset(pfn, level);
877 pte = &parent[offset];
878 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
880 if (level == *target_level)
883 if (!dma_pte_present(pte)) {
886 tmp_page = alloc_pgtable_page(domain->nid);
891 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
892 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
893 if (cmpxchg64(&pte->val, 0ULL, pteval))
894 /* Someone else set it while we were thinking; use theirs. */
895 free_pgtable_page(tmp_page);
897 domain_flush_cache(domain, pte, sizeof(*pte));
902 parent = phys_to_virt(dma_pte_addr(pte));
907 *target_level = level;
913 /* return address's pte at specific level */
914 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
916 int level, int *large_page)
918 struct dma_pte *parent, *pte;
919 int total = agaw_to_level(domain->agaw);
922 parent = domain->pgd;
923 while (level <= total) {
924 offset = pfn_level_offset(pfn, total);
925 pte = &parent[offset];
929 if (!dma_pte_present(pte)) {
934 if (dma_pte_superpage(pte)) {
939 parent = phys_to_virt(dma_pte_addr(pte));
945 /* clear last level pte, a tlb flush should be followed */
946 static void dma_pte_clear_range(struct dmar_domain *domain,
947 unsigned long start_pfn,
948 unsigned long last_pfn)
950 unsigned int large_page;
951 struct dma_pte *first_pte, *pte;
953 BUG_ON(!domain_pfn_supported(domain, start_pfn));
954 BUG_ON(!domain_pfn_supported(domain, last_pfn));
955 BUG_ON(start_pfn > last_pfn);
957 /* we don't need lock here; nobody else touches the iova range */
960 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
962 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
967 start_pfn += lvl_to_nr_pages(large_page);
969 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
971 domain_flush_cache(domain, first_pte,
972 (void *)pte - (void *)first_pte);
974 } while (start_pfn && start_pfn <= last_pfn);
977 static void dma_pte_free_level(struct dmar_domain *domain, int level,
978 int retain_level, struct dma_pte *pte,
979 unsigned long pfn, unsigned long start_pfn,
980 unsigned long last_pfn)
982 pfn = max(start_pfn, pfn);
983 pte = &pte[pfn_level_offset(pfn, level)];
986 unsigned long level_pfn;
987 struct dma_pte *level_pte;
989 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
992 level_pfn = pfn & level_mask(level);
993 level_pte = phys_to_virt(dma_pte_addr(pte));
996 dma_pte_free_level(domain, level - 1, retain_level,
997 level_pte, level_pfn, start_pfn,
1002 * Free the page table if we're below the level we want to
1003 * retain and the range covers the entire table.
1005 if (level < retain_level && !(start_pfn > level_pfn ||
1006 last_pfn < level_pfn + level_size(level) - 1)) {
1008 domain_flush_cache(domain, pte, sizeof(*pte));
1009 free_pgtable_page(level_pte);
1012 pfn += level_size(level);
1013 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1017 * clear last level (leaf) ptes and free page table pages below the
1018 * level we wish to keep intact.
1020 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1021 unsigned long start_pfn,
1022 unsigned long last_pfn,
1025 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1026 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1027 BUG_ON(start_pfn > last_pfn);
1029 dma_pte_clear_range(domain, start_pfn, last_pfn);
1031 /* We don't need lock here; nobody else touches the iova range */
1032 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1033 domain->pgd, 0, start_pfn, last_pfn);
1036 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1037 free_pgtable_page(domain->pgd);
1042 /* When a page at a given level is being unlinked from its parent, we don't
1043 need to *modify* it at all. All we need to do is make a list of all the
1044 pages which can be freed just as soon as we've flushed the IOTLB and we
1045 know the hardware page-walk will no longer touch them.
1046 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1048 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1049 int level, struct dma_pte *pte,
1050 struct page *freelist)
1054 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1055 pg->freelist = freelist;
1061 pte = page_address(pg);
1063 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1064 freelist = dma_pte_list_pagetables(domain, level - 1,
1067 } while (!first_pte_in_page(pte));
1072 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1073 struct dma_pte *pte, unsigned long pfn,
1074 unsigned long start_pfn,
1075 unsigned long last_pfn,
1076 struct page *freelist)
1078 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1080 pfn = max(start_pfn, pfn);
1081 pte = &pte[pfn_level_offset(pfn, level)];
1084 unsigned long level_pfn;
1086 if (!dma_pte_present(pte))
1089 level_pfn = pfn & level_mask(level);
1091 /* If range covers entire pagetable, free it */
1092 if (start_pfn <= level_pfn &&
1093 last_pfn >= level_pfn + level_size(level) - 1) {
1094 /* These suborbinate page tables are going away entirely. Don't
1095 bother to clear them; we're just going to *free* them. */
1096 if (level > 1 && !dma_pte_superpage(pte))
1097 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1103 } else if (level > 1) {
1104 /* Recurse down into a level that isn't *entirely* obsolete */
1105 freelist = dma_pte_clear_level(domain, level - 1,
1106 phys_to_virt(dma_pte_addr(pte)),
1107 level_pfn, start_pfn, last_pfn,
1111 pfn += level_size(level);
1112 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1115 domain_flush_cache(domain, first_pte,
1116 (void *)++last_pte - (void *)first_pte);
1121 /* We can't just free the pages because the IOMMU may still be walking
1122 the page tables, and may have cached the intermediate levels. The
1123 pages can only be freed after the IOTLB flush has been done. */
1124 static struct page *domain_unmap(struct dmar_domain *domain,
1125 unsigned long start_pfn,
1126 unsigned long last_pfn)
1128 struct page *freelist;
1130 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1131 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1132 BUG_ON(start_pfn > last_pfn);
1134 /* we don't need lock here; nobody else touches the iova range */
1135 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1136 domain->pgd, 0, start_pfn, last_pfn, NULL);
1139 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1140 struct page *pgd_page = virt_to_page(domain->pgd);
1141 pgd_page->freelist = freelist;
1142 freelist = pgd_page;
1150 static void dma_free_pagelist(struct page *freelist)
1154 while ((pg = freelist)) {
1155 freelist = pg->freelist;
1156 free_pgtable_page(page_address(pg));
1160 static void iova_entry_free(unsigned long data)
1162 struct page *freelist = (struct page *)data;
1164 dma_free_pagelist(freelist);
1167 /* iommu handling */
1168 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1170 struct root_entry *root;
1171 unsigned long flags;
1173 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1175 pr_err("Allocating root entry for %s failed\n",
1180 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1182 spin_lock_irqsave(&iommu->lock, flags);
1183 iommu->root_entry = root;
1184 spin_unlock_irqrestore(&iommu->lock, flags);
1189 static void iommu_set_root_entry(struct intel_iommu *iommu)
1195 addr = virt_to_phys(iommu->root_entry);
1196 if (sm_supported(iommu))
1197 addr |= DMA_RTADDR_SMT;
1199 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1200 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1202 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1204 /* Make sure hardware complete it */
1205 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1206 readl, (sts & DMA_GSTS_RTPS), sts);
1208 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1216 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1219 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1220 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1222 /* Make sure hardware complete it */
1223 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1224 readl, (!(val & DMA_GSTS_WBFS)), val);
1226 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1229 /* return value determine if we need a write buffer flush */
1230 static void __iommu_flush_context(struct intel_iommu *iommu,
1231 u16 did, u16 source_id, u8 function_mask,
1238 case DMA_CCMD_GLOBAL_INVL:
1239 val = DMA_CCMD_GLOBAL_INVL;
1241 case DMA_CCMD_DOMAIN_INVL:
1242 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1244 case DMA_CCMD_DEVICE_INVL:
1245 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1246 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1251 val |= DMA_CCMD_ICC;
1253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1254 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1256 /* Make sure hardware complete it */
1257 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1258 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1260 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1263 /* return value determine if we need a write buffer flush */
1264 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1265 u64 addr, unsigned int size_order, u64 type)
1267 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1268 u64 val = 0, val_iva = 0;
1272 case DMA_TLB_GLOBAL_FLUSH:
1273 /* global flush doesn't need set IVA_REG */
1274 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1276 case DMA_TLB_DSI_FLUSH:
1277 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1279 case DMA_TLB_PSI_FLUSH:
1280 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1281 /* IH bit is passed in as part of address */
1282 val_iva = size_order | addr;
1287 /* Note: set drain read/write */
1290 * This is probably to be super secure.. Looks like we can
1291 * ignore it without any impact.
1293 if (cap_read_drain(iommu->cap))
1294 val |= DMA_TLB_READ_DRAIN;
1296 if (cap_write_drain(iommu->cap))
1297 val |= DMA_TLB_WRITE_DRAIN;
1299 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1300 /* Note: Only uses first TLB reg currently */
1302 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1303 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1305 /* Make sure hardware complete it */
1306 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1307 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1309 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1311 /* check IOTLB invalidation granularity */
1312 if (DMA_TLB_IAIG(val) == 0)
1313 pr_err("Flush IOTLB failed\n");
1314 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1315 pr_debug("TLB flush request %Lx, actual %Lx\n",
1316 (unsigned long long)DMA_TLB_IIRG(type),
1317 (unsigned long long)DMA_TLB_IAIG(val));
1320 static struct device_domain_info *
1321 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1324 struct device_domain_info *info;
1326 assert_spin_locked(&device_domain_lock);
1331 list_for_each_entry(info, &domain->devices, link)
1332 if (info->iommu == iommu && info->bus == bus &&
1333 info->devfn == devfn) {
1334 if (info->ats_supported && info->dev)
1342 static void domain_update_iotlb(struct dmar_domain *domain)
1344 struct device_domain_info *info;
1345 bool has_iotlb_device = false;
1347 assert_spin_locked(&device_domain_lock);
1349 list_for_each_entry(info, &domain->devices, link) {
1350 struct pci_dev *pdev;
1352 if (!info->dev || !dev_is_pci(info->dev))
1355 pdev = to_pci_dev(info->dev);
1356 if (pdev->ats_enabled) {
1357 has_iotlb_device = true;
1362 domain->has_iotlb_device = has_iotlb_device;
1365 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1367 struct pci_dev *pdev;
1369 assert_spin_locked(&device_domain_lock);
1371 if (!info || !dev_is_pci(info->dev))
1374 pdev = to_pci_dev(info->dev);
1375 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1376 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1377 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1378 * reserved, which should be set to 0.
1380 if (!ecap_dit(info->iommu->ecap))
1383 struct pci_dev *pf_pdev;
1385 /* pdev will be returned if device is not a vf */
1386 pf_pdev = pci_physfn(pdev);
1387 info->pfsid = pci_dev_id(pf_pdev);
1390 #ifdef CONFIG_INTEL_IOMMU_SVM
1391 /* The PCIe spec, in its wisdom, declares that the behaviour of
1392 the device if you enable PASID support after ATS support is
1393 undefined. So always enable PASID support on devices which
1394 have it, even if we can't yet know if we're ever going to
1396 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1397 info->pasid_enabled = 1;
1399 if (info->pri_supported &&
1400 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1401 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1402 info->pri_enabled = 1;
1404 if (!pdev->untrusted && info->ats_supported &&
1405 pci_ats_page_aligned(pdev) &&
1406 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1407 info->ats_enabled = 1;
1408 domain_update_iotlb(info->domain);
1409 info->ats_qdep = pci_ats_queue_depth(pdev);
1413 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1415 struct pci_dev *pdev;
1417 assert_spin_locked(&device_domain_lock);
1419 if (!dev_is_pci(info->dev))
1422 pdev = to_pci_dev(info->dev);
1424 if (info->ats_enabled) {
1425 pci_disable_ats(pdev);
1426 info->ats_enabled = 0;
1427 domain_update_iotlb(info->domain);
1429 #ifdef CONFIG_INTEL_IOMMU_SVM
1430 if (info->pri_enabled) {
1431 pci_disable_pri(pdev);
1432 info->pri_enabled = 0;
1434 if (info->pasid_enabled) {
1435 pci_disable_pasid(pdev);
1436 info->pasid_enabled = 0;
1441 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1442 u64 addr, unsigned mask)
1445 unsigned long flags;
1446 struct device_domain_info *info;
1448 if (!domain->has_iotlb_device)
1451 spin_lock_irqsave(&device_domain_lock, flags);
1452 list_for_each_entry(info, &domain->devices, link) {
1453 if (!info->ats_enabled)
1456 sid = info->bus << 8 | info->devfn;
1457 qdep = info->ats_qdep;
1458 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1461 spin_unlock_irqrestore(&device_domain_lock, flags);
1464 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1465 struct dmar_domain *domain,
1466 unsigned long pfn, unsigned int pages,
1469 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1470 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1471 u16 did = domain->iommu_did[iommu->seq_id];
1478 * Fallback to domain selective flush if no PSI support or the size is
1480 * PSI requires page size to be 2 ^ x, and the base address is naturally
1481 * aligned to the size
1483 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1484 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1487 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1491 * In caching mode, changes of pages from non-present to present require
1492 * flush. However, device IOTLB doesn't need to be flushed in this case.
1494 if (!cap_caching_mode(iommu->cap) || !map)
1495 iommu_flush_dev_iotlb(domain, addr, mask);
1498 /* Notification for newly created mappings */
1499 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1500 struct dmar_domain *domain,
1501 unsigned long pfn, unsigned int pages)
1503 /* It's a non-present to present mapping. Only flush if caching mode */
1504 if (cap_caching_mode(iommu->cap))
1505 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1507 iommu_flush_write_buffer(iommu);
1510 static void iommu_flush_iova(struct iova_domain *iovad)
1512 struct dmar_domain *domain;
1515 domain = container_of(iovad, struct dmar_domain, iovad);
1517 for_each_domain_iommu(idx, domain) {
1518 struct intel_iommu *iommu = g_iommus[idx];
1519 u16 did = domain->iommu_did[iommu->seq_id];
1521 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1523 if (!cap_caching_mode(iommu->cap))
1524 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1525 0, MAX_AGAW_PFN_WIDTH);
1529 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1532 unsigned long flags;
1534 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1537 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1538 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1539 pmen &= ~DMA_PMEN_EPM;
1540 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1542 /* wait for the protected region status bit to clear */
1543 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1544 readl, !(pmen & DMA_PMEN_PRS), pmen);
1546 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1549 static void iommu_enable_translation(struct intel_iommu *iommu)
1552 unsigned long flags;
1554 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1555 iommu->gcmd |= DMA_GCMD_TE;
1556 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1558 /* Make sure hardware complete it */
1559 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1560 readl, (sts & DMA_GSTS_TES), sts);
1562 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1565 static void iommu_disable_translation(struct intel_iommu *iommu)
1570 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1571 iommu->gcmd &= ~DMA_GCMD_TE;
1572 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1574 /* Make sure hardware complete it */
1575 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1576 readl, (!(sts & DMA_GSTS_TES)), sts);
1578 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1582 static int iommu_init_domains(struct intel_iommu *iommu)
1584 u32 ndomains, nlongs;
1587 ndomains = cap_ndoms(iommu->cap);
1588 pr_debug("%s: Number of Domains supported <%d>\n",
1589 iommu->name, ndomains);
1590 nlongs = BITS_TO_LONGS(ndomains);
1592 spin_lock_init(&iommu->lock);
1594 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1595 if (!iommu->domain_ids) {
1596 pr_err("%s: Allocating domain id array failed\n",
1601 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1602 iommu->domains = kzalloc(size, GFP_KERNEL);
1604 if (iommu->domains) {
1605 size = 256 * sizeof(struct dmar_domain *);
1606 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1609 if (!iommu->domains || !iommu->domains[0]) {
1610 pr_err("%s: Allocating domain array failed\n",
1612 kfree(iommu->domain_ids);
1613 kfree(iommu->domains);
1614 iommu->domain_ids = NULL;
1615 iommu->domains = NULL;
1622 * If Caching mode is set, then invalid translations are tagged
1623 * with domain-id 0, hence we need to pre-allocate it. We also
1624 * use domain-id 0 as a marker for non-allocated domain-id, so
1625 * make sure it is not used for a real domain.
1627 set_bit(0, iommu->domain_ids);
1630 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1631 * entry for first-level or pass-through translation modes should
1632 * be programmed with a domain id different from those used for
1633 * second-level or nested translation. We reserve a domain id for
1636 if (sm_supported(iommu))
1637 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1642 static void disable_dmar_iommu(struct intel_iommu *iommu)
1644 struct device_domain_info *info, *tmp;
1645 unsigned long flags;
1647 if (!iommu->domains || !iommu->domain_ids)
1650 spin_lock_irqsave(&device_domain_lock, flags);
1651 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1652 struct dmar_domain *domain;
1654 if (info->iommu != iommu)
1657 if (!info->dev || !info->domain)
1660 domain = info->domain;
1662 __dmar_remove_one_dev_info(info);
1664 spin_unlock_irqrestore(&device_domain_lock, flags);
1666 if (iommu->gcmd & DMA_GCMD_TE)
1667 iommu_disable_translation(iommu);
1670 static void free_dmar_iommu(struct intel_iommu *iommu)
1672 if ((iommu->domains) && (iommu->domain_ids)) {
1673 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1676 for (i = 0; i < elems; i++)
1677 kfree(iommu->domains[i]);
1678 kfree(iommu->domains);
1679 kfree(iommu->domain_ids);
1680 iommu->domains = NULL;
1681 iommu->domain_ids = NULL;
1684 g_iommus[iommu->seq_id] = NULL;
1686 /* free context mapping */
1687 free_context_table(iommu);
1689 #ifdef CONFIG_INTEL_IOMMU_SVM
1690 if (pasid_supported(iommu)) {
1691 if (ecap_prs(iommu->ecap))
1692 intel_svm_finish_prq(iommu);
1697 static struct dmar_domain *alloc_domain(int flags)
1699 struct dmar_domain *domain;
1701 domain = alloc_domain_mem();
1705 memset(domain, 0, sizeof(*domain));
1706 domain->nid = NUMA_NO_NODE;
1707 domain->flags = flags;
1708 domain->has_iotlb_device = false;
1709 INIT_LIST_HEAD(&domain->devices);
1714 /* Must be called with iommu->lock */
1715 static int domain_attach_iommu(struct dmar_domain *domain,
1716 struct intel_iommu *iommu)
1718 unsigned long ndomains;
1721 assert_spin_locked(&device_domain_lock);
1722 assert_spin_locked(&iommu->lock);
1724 domain->iommu_refcnt[iommu->seq_id] += 1;
1725 domain->iommu_count += 1;
1726 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1727 ndomains = cap_ndoms(iommu->cap);
1728 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1730 if (num >= ndomains) {
1731 pr_err("%s: No free domain ids\n", iommu->name);
1732 domain->iommu_refcnt[iommu->seq_id] -= 1;
1733 domain->iommu_count -= 1;
1737 set_bit(num, iommu->domain_ids);
1738 set_iommu_domain(iommu, num, domain);
1740 domain->iommu_did[iommu->seq_id] = num;
1741 domain->nid = iommu->node;
1743 domain_update_iommu_cap(domain);
1749 static int domain_detach_iommu(struct dmar_domain *domain,
1750 struct intel_iommu *iommu)
1754 assert_spin_locked(&device_domain_lock);
1755 assert_spin_locked(&iommu->lock);
1757 domain->iommu_refcnt[iommu->seq_id] -= 1;
1758 count = --domain->iommu_count;
1759 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1760 num = domain->iommu_did[iommu->seq_id];
1761 clear_bit(num, iommu->domain_ids);
1762 set_iommu_domain(iommu, num, NULL);
1764 domain_update_iommu_cap(domain);
1765 domain->iommu_did[iommu->seq_id] = 0;
1771 static struct iova_domain reserved_iova_list;
1772 static struct lock_class_key reserved_rbtree_key;
1774 static int dmar_init_reserved_ranges(void)
1776 struct pci_dev *pdev = NULL;
1780 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1782 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1783 &reserved_rbtree_key);
1785 /* IOAPIC ranges shouldn't be accessed by DMA */
1786 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1787 IOVA_PFN(IOAPIC_RANGE_END));
1789 pr_err("Reserve IOAPIC range failed\n");
1793 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1794 for_each_pci_dev(pdev) {
1797 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1798 r = &pdev->resource[i];
1799 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1801 iova = reserve_iova(&reserved_iova_list,
1805 pci_err(pdev, "Reserve iova for %pR failed\n", r);
1813 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1815 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1818 static inline int guestwidth_to_adjustwidth(int gaw)
1821 int r = (gaw - 12) % 9;
1832 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1835 int adjust_width, agaw;
1836 unsigned long sagaw;
1839 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1841 err = init_iova_flush_queue(&domain->iovad,
1842 iommu_flush_iova, iova_entry_free);
1846 domain_reserve_special_ranges(domain);
1848 /* calculate AGAW */
1849 if (guest_width > cap_mgaw(iommu->cap))
1850 guest_width = cap_mgaw(iommu->cap);
1851 domain->gaw = guest_width;
1852 adjust_width = guestwidth_to_adjustwidth(guest_width);
1853 agaw = width_to_agaw(adjust_width);
1854 sagaw = cap_sagaw(iommu->cap);
1855 if (!test_bit(agaw, &sagaw)) {
1856 /* hardware doesn't support it, choose a bigger one */
1857 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1858 agaw = find_next_bit(&sagaw, 5, agaw);
1862 domain->agaw = agaw;
1864 if (ecap_coherent(iommu->ecap))
1865 domain->iommu_coherency = 1;
1867 domain->iommu_coherency = 0;
1869 if (ecap_sc_support(iommu->ecap))
1870 domain->iommu_snooping = 1;
1872 domain->iommu_snooping = 0;
1874 if (intel_iommu_superpage)
1875 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1877 domain->iommu_superpage = 0;
1879 domain->nid = iommu->node;
1881 /* always allocate the top pgd */
1882 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1885 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1889 static void domain_exit(struct dmar_domain *domain)
1891 struct page *freelist;
1893 /* Remove associated devices and clear attached or cached domains */
1894 domain_remove_dev_info(domain);
1897 put_iova_domain(&domain->iovad);
1899 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1901 dma_free_pagelist(freelist);
1903 free_domain_mem(domain);
1907 * Get the PASID directory size for scalable mode context entry.
1908 * Value of X in the PDTS field of a scalable mode context entry
1909 * indicates PASID directory with 2^(X + 7) entries.
1911 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1915 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1916 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1924 * Set the RID_PASID field of a scalable mode context entry. The
1925 * IOMMU hardware will use the PASID value set in this field for
1926 * DMA translations of DMA requests without PASID.
1929 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1931 context->hi |= pasid & ((1 << 20) - 1);
1932 context->hi |= (1 << 20);
1936 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1939 static inline void context_set_sm_dte(struct context_entry *context)
1941 context->lo |= (1 << 2);
1945 * Set the PRE(Page Request Enable) field of a scalable mode context
1948 static inline void context_set_sm_pre(struct context_entry *context)
1950 context->lo |= (1 << 4);
1953 /* Convert value to context PASID directory size field coding. */
1954 #define context_pdts(pds) (((pds) & 0x7) << 9)
1956 static int domain_context_mapping_one(struct dmar_domain *domain,
1957 struct intel_iommu *iommu,
1958 struct pasid_table *table,
1961 u16 did = domain->iommu_did[iommu->seq_id];
1962 int translation = CONTEXT_TT_MULTI_LEVEL;
1963 struct device_domain_info *info = NULL;
1964 struct context_entry *context;
1965 unsigned long flags;
1970 if (hw_pass_through && domain_type_is_si(domain))
1971 translation = CONTEXT_TT_PASS_THROUGH;
1973 pr_debug("Set context mapping for %02x:%02x.%d\n",
1974 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1976 BUG_ON(!domain->pgd);
1978 spin_lock_irqsave(&device_domain_lock, flags);
1979 spin_lock(&iommu->lock);
1982 context = iommu_context_addr(iommu, bus, devfn, 1);
1987 if (context_present(context))
1991 * For kdump cases, old valid entries may be cached due to the
1992 * in-flight DMA and copied pgtable, but there is no unmapping
1993 * behaviour for them, thus we need an explicit cache flush for
1994 * the newly-mapped device. For kdump, at this point, the device
1995 * is supposed to finish reset at its driver probe stage, so no
1996 * in-flight DMA will exist, and we don't need to worry anymore
1999 if (context_copied(context)) {
2000 u16 did_old = context_domain_id(context);
2002 if (did_old < cap_ndoms(iommu->cap)) {
2003 iommu->flush.flush_context(iommu, did_old,
2004 (((u16)bus) << 8) | devfn,
2005 DMA_CCMD_MASK_NOBIT,
2006 DMA_CCMD_DEVICE_INVL);
2007 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2012 context_clear_entry(context);
2014 if (sm_supported(iommu)) {
2019 /* Setup the PASID DIR pointer: */
2020 pds = context_get_sm_pds(table);
2021 context->lo = (u64)virt_to_phys(table->table) |
2024 /* Setup the RID_PASID field: */
2025 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2028 * Setup the Device-TLB enable bit and Page request
2031 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2032 if (info && info->ats_supported)
2033 context_set_sm_dte(context);
2034 if (info && info->pri_supported)
2035 context_set_sm_pre(context);
2037 struct dma_pte *pgd = domain->pgd;
2040 context_set_domain_id(context, did);
2042 if (translation != CONTEXT_TT_PASS_THROUGH) {
2044 * Skip top levels of page tables for iommu which has
2045 * less agaw than default. Unnecessary for PT mode.
2047 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2049 pgd = phys_to_virt(dma_pte_addr(pgd));
2050 if (!dma_pte_present(pgd))
2054 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2055 if (info && info->ats_supported)
2056 translation = CONTEXT_TT_DEV_IOTLB;
2058 translation = CONTEXT_TT_MULTI_LEVEL;
2060 context_set_address_root(context, virt_to_phys(pgd));
2061 context_set_address_width(context, agaw);
2064 * In pass through mode, AW must be programmed to
2065 * indicate the largest AGAW value supported by
2066 * hardware. And ASR is ignored by hardware.
2068 context_set_address_width(context, iommu->msagaw);
2071 context_set_translation_type(context, translation);
2074 context_set_fault_enable(context);
2075 context_set_present(context);
2076 domain_flush_cache(domain, context, sizeof(*context));
2079 * It's a non-present to present mapping. If hardware doesn't cache
2080 * non-present entry we only need to flush the write-buffer. If the
2081 * _does_ cache non-present entries, then it does so in the special
2082 * domain #0, which we have to flush:
2084 if (cap_caching_mode(iommu->cap)) {
2085 iommu->flush.flush_context(iommu, 0,
2086 (((u16)bus) << 8) | devfn,
2087 DMA_CCMD_MASK_NOBIT,
2088 DMA_CCMD_DEVICE_INVL);
2089 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2091 iommu_flush_write_buffer(iommu);
2093 iommu_enable_dev_iotlb(info);
2098 spin_unlock(&iommu->lock);
2099 spin_unlock_irqrestore(&device_domain_lock, flags);
2104 struct domain_context_mapping_data {
2105 struct dmar_domain *domain;
2106 struct intel_iommu *iommu;
2107 struct pasid_table *table;
2110 static int domain_context_mapping_cb(struct pci_dev *pdev,
2111 u16 alias, void *opaque)
2113 struct domain_context_mapping_data *data = opaque;
2115 return domain_context_mapping_one(data->domain, data->iommu,
2116 data->table, PCI_BUS_NUM(alias),
2121 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2123 struct domain_context_mapping_data data;
2124 struct pasid_table *table;
2125 struct intel_iommu *iommu;
2128 iommu = device_to_iommu(dev, &bus, &devfn);
2132 table = intel_pasid_get_table(dev);
2134 if (!dev_is_pci(dev))
2135 return domain_context_mapping_one(domain, iommu, table,
2138 data.domain = domain;
2142 return pci_for_each_dma_alias(to_pci_dev(dev),
2143 &domain_context_mapping_cb, &data);
2146 static int domain_context_mapped_cb(struct pci_dev *pdev,
2147 u16 alias, void *opaque)
2149 struct intel_iommu *iommu = opaque;
2151 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2154 static int domain_context_mapped(struct device *dev)
2156 struct intel_iommu *iommu;
2159 iommu = device_to_iommu(dev, &bus, &devfn);
2163 if (!dev_is_pci(dev))
2164 return device_context_mapped(iommu, bus, devfn);
2166 return !pci_for_each_dma_alias(to_pci_dev(dev),
2167 domain_context_mapped_cb, iommu);
2170 /* Returns a number of VTD pages, but aligned to MM page size */
2171 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2174 host_addr &= ~PAGE_MASK;
2175 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2178 /* Return largest possible superpage level for a given mapping */
2179 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2180 unsigned long iov_pfn,
2181 unsigned long phy_pfn,
2182 unsigned long pages)
2184 int support, level = 1;
2185 unsigned long pfnmerge;
2187 support = domain->iommu_superpage;
2189 /* To use a large page, the virtual *and* physical addresses
2190 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2191 of them will mean we have to use smaller pages. So just
2192 merge them and check both at once. */
2193 pfnmerge = iov_pfn | phy_pfn;
2195 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2196 pages >>= VTD_STRIDE_SHIFT;
2199 pfnmerge >>= VTD_STRIDE_SHIFT;
2206 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2207 struct scatterlist *sg, unsigned long phys_pfn,
2208 unsigned long nr_pages, int prot)
2210 struct dma_pte *first_pte = NULL, *pte = NULL;
2211 phys_addr_t uninitialized_var(pteval);
2212 unsigned long sg_res = 0;
2213 unsigned int largepage_lvl = 0;
2214 unsigned long lvl_pages = 0;
2216 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2218 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2221 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2225 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2228 while (nr_pages > 0) {
2232 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2234 sg_res = aligned_nrpages(sg->offset, sg->length);
2235 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2236 sg->dma_length = sg->length;
2237 pteval = (sg_phys(sg) - pgoff) | prot;
2238 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2242 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2244 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2247 /* It is large page*/
2248 if (largepage_lvl > 1) {
2249 unsigned long nr_superpages, end_pfn;
2251 pteval |= DMA_PTE_LARGE_PAGE;
2252 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2254 nr_superpages = sg_res / lvl_pages;
2255 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2258 * Ensure that old small page tables are
2259 * removed to make room for superpage(s).
2260 * We're adding new large pages, so make sure
2261 * we don't remove their parent tables.
2263 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2266 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2270 /* We don't need lock here, nobody else
2271 * touches the iova range
2273 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2275 static int dumps = 5;
2276 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2277 iov_pfn, tmp, (unsigned long long)pteval);
2280 debug_dma_dump_mappings(NULL);
2285 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2287 BUG_ON(nr_pages < lvl_pages);
2288 BUG_ON(sg_res < lvl_pages);
2290 nr_pages -= lvl_pages;
2291 iov_pfn += lvl_pages;
2292 phys_pfn += lvl_pages;
2293 pteval += lvl_pages * VTD_PAGE_SIZE;
2294 sg_res -= lvl_pages;
2296 /* If the next PTE would be the first in a new page, then we
2297 need to flush the cache on the entries we've just written.
2298 And then we'll need to recalculate 'pte', so clear it and
2299 let it get set again in the if (!pte) block above.
2301 If we're done (!nr_pages) we need to flush the cache too.
2303 Also if we've been setting superpages, we may need to
2304 recalculate 'pte' and switch back to smaller pages for the
2305 end of the mapping, if the trailing size is not enough to
2306 use another superpage (i.e. sg_res < lvl_pages). */
2308 if (!nr_pages || first_pte_in_page(pte) ||
2309 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2310 domain_flush_cache(domain, first_pte,
2311 (void *)pte - (void *)first_pte);
2315 if (!sg_res && nr_pages)
2321 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2322 struct scatterlist *sg, unsigned long phys_pfn,
2323 unsigned long nr_pages, int prot)
2326 struct intel_iommu *iommu;
2328 /* Do the real mapping first */
2329 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2333 for_each_domain_iommu(iommu_id, domain) {
2334 iommu = g_iommus[iommu_id];
2335 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2341 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2342 struct scatterlist *sg, unsigned long nr_pages,
2345 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2348 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2349 unsigned long phys_pfn, unsigned long nr_pages,
2352 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2355 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2357 unsigned long flags;
2358 struct context_entry *context;
2364 spin_lock_irqsave(&iommu->lock, flags);
2365 context = iommu_context_addr(iommu, bus, devfn, 0);
2367 spin_unlock_irqrestore(&iommu->lock, flags);
2370 did_old = context_domain_id(context);
2371 context_clear_entry(context);
2372 __iommu_flush_cache(iommu, context, sizeof(*context));
2373 spin_unlock_irqrestore(&iommu->lock, flags);
2374 iommu->flush.flush_context(iommu,
2376 (((u16)bus) << 8) | devfn,
2377 DMA_CCMD_MASK_NOBIT,
2378 DMA_CCMD_DEVICE_INVL);
2379 iommu->flush.flush_iotlb(iommu,
2386 static inline void unlink_domain_info(struct device_domain_info *info)
2388 assert_spin_locked(&device_domain_lock);
2389 list_del(&info->link);
2390 list_del(&info->global);
2392 info->dev->archdata.iommu = NULL;
2395 static void domain_remove_dev_info(struct dmar_domain *domain)
2397 struct device_domain_info *info, *tmp;
2398 unsigned long flags;
2400 spin_lock_irqsave(&device_domain_lock, flags);
2401 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2402 __dmar_remove_one_dev_info(info);
2403 spin_unlock_irqrestore(&device_domain_lock, flags);
2408 * Note: we use struct device->archdata.iommu stores the info
2410 static struct dmar_domain *find_domain(struct device *dev)
2412 struct device_domain_info *info;
2414 if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
2415 struct iommu_domain *domain;
2417 dev->archdata.iommu = NULL;
2418 domain = iommu_get_domain_for_dev(dev);
2420 intel_iommu_attach_device(domain, dev);
2423 /* No lock here, assumes no domain exit in normal case */
2424 info = dev->archdata.iommu;
2427 return info->domain;
2431 static inline struct device_domain_info *
2432 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2434 struct device_domain_info *info;
2436 list_for_each_entry(info, &device_domain_list, global)
2437 if (info->iommu->segment == segment && info->bus == bus &&
2438 info->devfn == devfn)
2444 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2447 struct dmar_domain *domain)
2449 struct dmar_domain *found = NULL;
2450 struct device_domain_info *info;
2451 unsigned long flags;
2454 info = alloc_devinfo_mem();
2459 info->devfn = devfn;
2460 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2461 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2464 info->domain = domain;
2465 info->iommu = iommu;
2466 info->pasid_table = NULL;
2467 info->auxd_enabled = 0;
2468 INIT_LIST_HEAD(&info->auxiliary_domains);
2470 if (dev && dev_is_pci(dev)) {
2471 struct pci_dev *pdev = to_pci_dev(info->dev);
2473 if (!pdev->untrusted &&
2474 !pci_ats_disabled() &&
2475 ecap_dev_iotlb_support(iommu->ecap) &&
2476 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2477 dmar_find_matched_atsr_unit(pdev))
2478 info->ats_supported = 1;
2480 if (sm_supported(iommu)) {
2481 if (pasid_supported(iommu)) {
2482 int features = pci_pasid_features(pdev);
2484 info->pasid_supported = features | 1;
2487 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2488 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2489 info->pri_supported = 1;
2493 spin_lock_irqsave(&device_domain_lock, flags);
2495 found = find_domain(dev);
2498 struct device_domain_info *info2;
2499 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2501 found = info2->domain;
2507 spin_unlock_irqrestore(&device_domain_lock, flags);
2508 free_devinfo_mem(info);
2509 /* Caller must free the original domain */
2513 spin_lock(&iommu->lock);
2514 ret = domain_attach_iommu(domain, iommu);
2515 spin_unlock(&iommu->lock);
2518 spin_unlock_irqrestore(&device_domain_lock, flags);
2519 free_devinfo_mem(info);
2523 list_add(&info->link, &domain->devices);
2524 list_add(&info->global, &device_domain_list);
2526 dev->archdata.iommu = info;
2527 spin_unlock_irqrestore(&device_domain_lock, flags);
2529 /* PASID table is mandatory for a PCI device in scalable mode. */
2530 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2531 ret = intel_pasid_alloc_table(dev);
2533 dev_err(dev, "PASID table allocation failed\n");
2534 dmar_remove_one_dev_info(dev);
2538 /* Setup the PASID entry for requests without PASID: */
2539 spin_lock(&iommu->lock);
2540 if (hw_pass_through && domain_type_is_si(domain))
2541 ret = intel_pasid_setup_pass_through(iommu, domain,
2542 dev, PASID_RID2PASID);
2544 ret = intel_pasid_setup_second_level(iommu, domain,
2545 dev, PASID_RID2PASID);
2546 spin_unlock(&iommu->lock);
2548 dev_err(dev, "Setup RID2PASID failed\n");
2549 dmar_remove_one_dev_info(dev);
2554 if (dev && domain_context_mapping(domain, dev)) {
2555 dev_err(dev, "Domain context map failed\n");
2556 dmar_remove_one_dev_info(dev);
2563 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2565 *(u16 *)opaque = alias;
2569 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2571 struct device_domain_info *info;
2572 struct dmar_domain *domain = NULL;
2573 struct intel_iommu *iommu;
2575 unsigned long flags;
2578 iommu = device_to_iommu(dev, &bus, &devfn);
2582 if (dev_is_pci(dev)) {
2583 struct pci_dev *pdev = to_pci_dev(dev);
2585 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2587 spin_lock_irqsave(&device_domain_lock, flags);
2588 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2589 PCI_BUS_NUM(dma_alias),
2592 iommu = info->iommu;
2593 domain = info->domain;
2595 spin_unlock_irqrestore(&device_domain_lock, flags);
2597 /* DMA alias already has a domain, use it */
2602 /* Allocate and initialize new domain for the device */
2603 domain = alloc_domain(0);
2606 if (domain_init(domain, iommu, gaw)) {
2607 domain_exit(domain);
2615 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2616 struct dmar_domain *domain)
2618 struct intel_iommu *iommu;
2619 struct dmar_domain *tmp;
2620 u16 req_id, dma_alias;
2623 iommu = device_to_iommu(dev, &bus, &devfn);
2627 req_id = ((u16)bus << 8) | devfn;
2629 if (dev_is_pci(dev)) {
2630 struct pci_dev *pdev = to_pci_dev(dev);
2632 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2634 /* register PCI DMA alias device */
2635 if (req_id != dma_alias) {
2636 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2637 dma_alias & 0xff, NULL, domain);
2639 if (!tmp || tmp != domain)
2644 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2645 if (!tmp || tmp != domain)
2651 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2653 struct dmar_domain *domain, *tmp;
2655 domain = find_domain(dev);
2659 domain = find_or_alloc_domain(dev, gaw);
2663 tmp = set_domain_for_dev(dev, domain);
2664 if (!tmp || domain != tmp) {
2665 domain_exit(domain);
2674 static int iommu_domain_identity_map(struct dmar_domain *domain,
2675 unsigned long long start,
2676 unsigned long long end)
2678 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2679 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2681 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2682 dma_to_mm_pfn(last_vpfn))) {
2683 pr_err("Reserving iova failed\n");
2687 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2689 * RMRR range might have overlap with physical memory range,
2692 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2694 return __domain_mapping(domain, first_vpfn, NULL,
2695 first_vpfn, last_vpfn - first_vpfn + 1,
2696 DMA_PTE_READ|DMA_PTE_WRITE);
2699 static int domain_prepare_identity_map(struct device *dev,
2700 struct dmar_domain *domain,
2701 unsigned long long start,
2702 unsigned long long end)
2704 /* For _hardware_ passthrough, don't bother. But for software
2705 passthrough, we do it anyway -- it may indicate a memory
2706 range which is reserved in E820, so which didn't get set
2707 up to start with in si_domain */
2708 if (domain == si_domain && hw_pass_through) {
2709 dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
2714 dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
2717 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2718 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2719 dmi_get_system_info(DMI_BIOS_VENDOR),
2720 dmi_get_system_info(DMI_BIOS_VERSION),
2721 dmi_get_system_info(DMI_PRODUCT_VERSION));
2725 if (end >> agaw_to_width(domain->agaw)) {
2726 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2727 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2728 agaw_to_width(domain->agaw),
2729 dmi_get_system_info(DMI_BIOS_VENDOR),
2730 dmi_get_system_info(DMI_BIOS_VERSION),
2731 dmi_get_system_info(DMI_PRODUCT_VERSION));
2735 return iommu_domain_identity_map(domain, start, end);
2738 static int iommu_prepare_identity_map(struct device *dev,
2739 unsigned long long start,
2740 unsigned long long end)
2742 struct dmar_domain *domain;
2745 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2749 ret = domain_prepare_identity_map(dev, domain, start, end);
2751 domain_exit(domain);
2756 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2759 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2761 return iommu_prepare_identity_map(dev, rmrr->base_address,
2765 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2766 static inline void iommu_prepare_isa(void)
2768 struct pci_dev *pdev;
2771 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2775 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2776 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2779 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2784 static inline void iommu_prepare_isa(void)
2788 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2790 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2792 static int __init si_domain_init(int hw)
2794 struct dmar_rmrr_unit *rmrr;
2798 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2802 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2803 domain_exit(si_domain);
2810 for_each_online_node(nid) {
2811 unsigned long start_pfn, end_pfn;
2814 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2815 ret = iommu_domain_identity_map(si_domain,
2816 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2823 * Normally we use DMA domains for devices which have RMRRs. But we
2824 * loose this requirement for graphic and usb devices. Identity map
2825 * the RMRRs for graphic and USB devices so that they could use the
2828 for_each_rmrr_units(rmrr) {
2829 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2831 unsigned long long start = rmrr->base_address;
2832 unsigned long long end = rmrr->end_address;
2834 if (device_is_rmrr_locked(dev))
2837 if (WARN_ON(end < start ||
2838 end >> agaw_to_width(si_domain->agaw)))
2841 ret = iommu_domain_identity_map(si_domain, start, end);
2850 static int identity_mapping(struct device *dev)
2852 struct device_domain_info *info;
2854 info = dev->archdata.iommu;
2855 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2856 return (info->domain == si_domain);
2861 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2863 struct dmar_domain *ndomain;
2864 struct intel_iommu *iommu;
2867 iommu = device_to_iommu(dev, &bus, &devfn);
2871 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2872 if (ndomain != domain)
2878 static bool device_has_rmrr(struct device *dev)
2880 struct dmar_rmrr_unit *rmrr;
2885 for_each_rmrr_units(rmrr) {
2887 * Return TRUE if this RMRR contains the device that
2890 for_each_active_dev_scope(rmrr->devices,
2891 rmrr->devices_cnt, i, tmp)
2902 * There are a couple cases where we need to restrict the functionality of
2903 * devices associated with RMRRs. The first is when evaluating a device for
2904 * identity mapping because problems exist when devices are moved in and out
2905 * of domains and their respective RMRR information is lost. This means that
2906 * a device with associated RMRRs will never be in a "passthrough" domain.
2907 * The second is use of the device through the IOMMU API. This interface
2908 * expects to have full control of the IOVA space for the device. We cannot
2909 * satisfy both the requirement that RMRR access is maintained and have an
2910 * unencumbered IOVA space. We also have no ability to quiesce the device's
2911 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2912 * We therefore prevent devices associated with an RMRR from participating in
2913 * the IOMMU API, which eliminates them from device assignment.
2915 * In both cases we assume that PCI USB devices with RMRRs have them largely
2916 * for historical reasons and that the RMRR space is not actively used post
2917 * boot. This exclusion may change if vendors begin to abuse it.
2919 * The same exception is made for graphics devices, with the requirement that
2920 * any use of the RMRR regions will be torn down before assigning the device
2923 static bool device_is_rmrr_locked(struct device *dev)
2925 if (!device_has_rmrr(dev))
2928 if (dev_is_pci(dev)) {
2929 struct pci_dev *pdev = to_pci_dev(dev);
2931 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2939 * Return the required default domain type for a specific device.
2941 * @dev: the device in query
2942 * @startup: true if this is during early boot
2945 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2946 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2947 * - 0: both identity and dynamic domains work for this device
2949 static int device_def_domain_type(struct device *dev, int startup)
2951 if (dev_is_pci(dev)) {
2952 struct pci_dev *pdev = to_pci_dev(dev);
2954 if (device_is_rmrr_locked(dev))
2955 return IOMMU_DOMAIN_DMA;
2958 * Prevent any device marked as untrusted from getting
2959 * placed into the statically identity mapping domain.
2961 if (pdev->untrusted)
2962 return IOMMU_DOMAIN_DMA;
2964 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2965 return IOMMU_DOMAIN_IDENTITY;
2967 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2968 return IOMMU_DOMAIN_IDENTITY;
2971 * We want to start off with all devices in the 1:1 domain, and
2972 * take them out later if we find they can't access all of memory.
2974 * However, we can't do this for PCI devices behind bridges,
2975 * because all PCI devices behind the same bridge will end up
2976 * with the same source-id on their transactions.
2978 * Practically speaking, we can't change things around for these
2979 * devices at run-time, because we can't be sure there'll be no
2980 * DMA transactions in flight for any of their siblings.
2982 * So PCI devices (unless they're on the root bus) as well as
2983 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2984 * the 1:1 domain, just in _case_ one of their siblings turns out
2985 * not to be able to map all of memory.
2987 if (!pci_is_pcie(pdev)) {
2988 if (!pci_is_root_bus(pdev->bus))
2989 return IOMMU_DOMAIN_DMA;
2990 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2991 return IOMMU_DOMAIN_DMA;
2992 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2993 return IOMMU_DOMAIN_DMA;
2995 if (device_has_rmrr(dev))
2996 return IOMMU_DOMAIN_DMA;
2999 return (iommu_identity_mapping & IDENTMAP_ALL) ?
3000 IOMMU_DOMAIN_IDENTITY : 0;
3003 static inline int iommu_should_identity_map(struct device *dev, int startup)
3005 return device_def_domain_type(dev, startup) == IOMMU_DOMAIN_IDENTITY;
3008 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
3012 if (!iommu_should_identity_map(dev, 1))
3015 ret = domain_add_dev_info(si_domain, dev);
3017 dev_info(dev, "%s identity mapping\n",
3018 hw ? "Hardware" : "Software");
3019 else if (ret == -ENODEV)
3020 /* device not associated with an iommu */
3027 static int __init iommu_prepare_static_identity_mapping(int hw)
3029 struct pci_dev *pdev = NULL;
3030 struct dmar_drhd_unit *drhd;
3031 struct intel_iommu *iommu;
3036 for_each_pci_dev(pdev) {
3037 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
3042 for_each_active_iommu(iommu, drhd)
3043 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
3044 struct acpi_device_physical_node *pn;
3045 struct acpi_device *adev;
3047 if (dev->bus != &acpi_bus_type)
3050 adev= to_acpi_device(dev);
3051 mutex_lock(&adev->physical_node_lock);
3052 list_for_each_entry(pn, &adev->physical_node_list, node) {
3053 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
3057 mutex_unlock(&adev->physical_node_lock);
3065 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3068 * Start from the sane iommu hardware state.
3069 * If the queued invalidation is already initialized by us
3070 * (for example, while enabling interrupt-remapping) then
3071 * we got the things already rolling from a sane state.
3075 * Clear any previous faults.
3077 dmar_fault(-1, iommu);
3079 * Disable queued invalidation if supported and already enabled
3080 * before OS handover.
3082 dmar_disable_qi(iommu);
3085 if (dmar_enable_qi(iommu)) {
3087 * Queued Invalidate not enabled, use Register Based Invalidate
3089 iommu->flush.flush_context = __iommu_flush_context;
3090 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3091 pr_info("%s: Using Register based invalidation\n",
3094 iommu->flush.flush_context = qi_flush_context;
3095 iommu->flush.flush_iotlb = qi_flush_iotlb;
3096 pr_info("%s: Using Queued invalidation\n", iommu->name);
3100 static int copy_context_table(struct intel_iommu *iommu,
3101 struct root_entry *old_re,
3102 struct context_entry **tbl,
3105 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3106 struct context_entry *new_ce = NULL, ce;
3107 struct context_entry *old_ce = NULL;
3108 struct root_entry re;
3109 phys_addr_t old_ce_phys;
3111 tbl_idx = ext ? bus * 2 : bus;
3112 memcpy(&re, old_re, sizeof(re));
3114 for (devfn = 0; devfn < 256; devfn++) {
3115 /* First calculate the correct index */
3116 idx = (ext ? devfn * 2 : devfn) % 256;
3119 /* First save what we may have and clean up */
3121 tbl[tbl_idx] = new_ce;
3122 __iommu_flush_cache(iommu, new_ce,
3132 old_ce_phys = root_entry_lctp(&re);
3134 old_ce_phys = root_entry_uctp(&re);
3137 if (ext && devfn == 0) {
3138 /* No LCTP, try UCTP */
3147 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3152 new_ce = alloc_pgtable_page(iommu->node);
3159 /* Now copy the context entry */
3160 memcpy(&ce, old_ce + idx, sizeof(ce));
3162 if (!__context_present(&ce))
3165 did = context_domain_id(&ce);
3166 if (did >= 0 && did < cap_ndoms(iommu->cap))
3167 set_bit(did, iommu->domain_ids);
3170 * We need a marker for copied context entries. This
3171 * marker needs to work for the old format as well as
3172 * for extended context entries.
3174 * Bit 67 of the context entry is used. In the old
3175 * format this bit is available to software, in the
3176 * extended format it is the PGE bit, but PGE is ignored
3177 * by HW if PASIDs are disabled (and thus still
3180 * So disable PASIDs first and then mark the entry
3181 * copied. This means that we don't copy PASID
3182 * translations from the old kernel, but this is fine as
3183 * faults there are not fatal.
3185 context_clear_pasid_enable(&ce);
3186 context_set_copied(&ce);
3191 tbl[tbl_idx + pos] = new_ce;
3193 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3202 static int copy_translation_tables(struct intel_iommu *iommu)
3204 struct context_entry **ctxt_tbls;
3205 struct root_entry *old_rt;
3206 phys_addr_t old_rt_phys;
3207 int ctxt_table_entries;
3208 unsigned long flags;
3213 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3214 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3215 new_ext = !!ecap_ecs(iommu->ecap);
3218 * The RTT bit can only be changed when translation is disabled,
3219 * but disabling translation means to open a window for data
3220 * corruption. So bail out and don't copy anything if we would
3221 * have to change the bit.
3226 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3230 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3234 /* This is too big for the stack - allocate it from slab */
3235 ctxt_table_entries = ext ? 512 : 256;
3237 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3241 for (bus = 0; bus < 256; bus++) {
3242 ret = copy_context_table(iommu, &old_rt[bus],
3243 ctxt_tbls, bus, ext);
3245 pr_err("%s: Failed to copy context table for bus %d\n",
3251 spin_lock_irqsave(&iommu->lock, flags);
3253 /* Context tables are copied, now write them to the root_entry table */
3254 for (bus = 0; bus < 256; bus++) {
3255 int idx = ext ? bus * 2 : bus;
3258 if (ctxt_tbls[idx]) {
3259 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3260 iommu->root_entry[bus].lo = val;
3263 if (!ext || !ctxt_tbls[idx + 1])
3266 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3267 iommu->root_entry[bus].hi = val;
3270 spin_unlock_irqrestore(&iommu->lock, flags);
3274 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3284 static int __init init_dmars(void)
3286 struct dmar_drhd_unit *drhd;
3287 struct dmar_rmrr_unit *rmrr;
3288 bool copied_tables = false;
3290 struct intel_iommu *iommu;
3296 * initialize and program root entry to not present
3299 for_each_drhd_unit(drhd) {
3301 * lock not needed as this is only incremented in the single
3302 * threaded kernel __init code path all other access are read
3305 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3309 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3312 /* Preallocate enough resources for IOMMU hot-addition */
3313 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3314 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3316 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3319 pr_err("Allocating global iommu array failed\n");
3324 for_each_active_iommu(iommu, drhd) {
3326 * Find the max pasid size of all IOMMU's in the system.
3327 * We need to ensure the system pasid table is no bigger
3328 * than the smallest supported.
3330 if (pasid_supported(iommu)) {
3331 u32 temp = 2 << ecap_pss(iommu->ecap);
3333 intel_pasid_max_id = min_t(u32, temp,
3334 intel_pasid_max_id);
3337 g_iommus[iommu->seq_id] = iommu;
3339 intel_iommu_init_qi(iommu);
3341 ret = iommu_init_domains(iommu);
3345 init_translation_status(iommu);
3347 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3348 iommu_disable_translation(iommu);
3349 clear_translation_pre_enabled(iommu);
3350 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3356 * we could share the same root & context tables
3357 * among all IOMMU's. Need to Split it later.
3359 ret = iommu_alloc_root_entry(iommu);
3363 if (translation_pre_enabled(iommu)) {
3364 pr_info("Translation already enabled - trying to copy translation structures\n");
3366 ret = copy_translation_tables(iommu);
3369 * We found the IOMMU with translation
3370 * enabled - but failed to copy over the
3371 * old root-entry table. Try to proceed
3372 * by disabling translation now and
3373 * allocating a clean root-entry table.
3374 * This might cause DMAR faults, but
3375 * probably the dump will still succeed.
3377 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3379 iommu_disable_translation(iommu);
3380 clear_translation_pre_enabled(iommu);
3382 pr_info("Copied translation tables from previous kernel for %s\n",
3384 copied_tables = true;
3388 if (!ecap_pass_through(iommu->ecap))
3389 hw_pass_through = 0;
3390 #ifdef CONFIG_INTEL_IOMMU_SVM
3391 if (pasid_supported(iommu))
3392 intel_svm_init(iommu);
3397 * Now that qi is enabled on all iommus, set the root entry and flush
3398 * caches. This is required on some Intel X58 chipsets, otherwise the
3399 * flush_context function will loop forever and the boot hangs.
3401 for_each_active_iommu(iommu, drhd) {
3402 iommu_flush_write_buffer(iommu);
3403 iommu_set_root_entry(iommu);
3404 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3405 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3408 if (iommu_pass_through)
3409 iommu_identity_mapping |= IDENTMAP_ALL;
3411 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3416 iommu_identity_mapping |= IDENTMAP_GFX;
3418 check_tylersburg_isoch();
3420 ret = si_domain_init(hw_pass_through);
3426 * If we copied translations from a previous kernel in the kdump
3427 * case, we can not assign the devices to domains now, as that
3428 * would eliminate the old mappings. So skip this part and defer
3429 * the assignment to device driver initialization time.
3435 * If pass through is not set or not enabled, setup context entries for
3436 * identity mappings for rmrr, gfx, and isa and may fall back to static
3437 * identity mapping if iommu_identity_mapping is set.
3439 if (iommu_identity_mapping) {
3440 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3442 pr_crit("Failed to setup IOMMU pass-through\n");
3448 * for each dev attached to rmrr
3450 * locate drhd for dev, alloc domain for dev
3451 * allocate free domain
3452 * allocate page table entries for rmrr
3453 * if context not allocated for bus
3454 * allocate and init context
3455 * set present in root table for this bus
3456 * init context with domain, translation etc
3460 pr_info("Setting RMRR:\n");
3461 for_each_rmrr_units(rmrr) {
3462 /* some BIOS lists non-exist devices in DMAR table. */
3463 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3465 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3467 pr_err("Mapping reserved region failed\n");
3471 iommu_prepare_isa();
3478 * global invalidate context cache
3479 * global invalidate iotlb
3480 * enable translation
3482 for_each_iommu(iommu, drhd) {
3483 if (drhd->ignored) {
3485 * we always have to disable PMRs or DMA may fail on
3489 iommu_disable_protect_mem_regions(iommu);
3493 iommu_flush_write_buffer(iommu);
3495 #ifdef CONFIG_INTEL_IOMMU_SVM
3496 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3498 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3499 * could cause possible lock race condition.
3501 up_write(&dmar_global_lock);
3502 ret = intel_svm_enable_prq(iommu);
3503 down_write(&dmar_global_lock);
3508 ret = dmar_set_interrupt(iommu);
3516 for_each_active_iommu(iommu, drhd) {
3517 disable_dmar_iommu(iommu);
3518 free_dmar_iommu(iommu);
3527 /* This takes a number of _MM_ pages, not VTD pages */
3528 static unsigned long intel_alloc_iova(struct device *dev,
3529 struct dmar_domain *domain,
3530 unsigned long nrpages, uint64_t dma_mask)
3532 unsigned long iova_pfn;
3534 /* Restrict dma_mask to the width that the iommu can handle */
3535 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3536 /* Ensure we reserve the whole size-aligned region */
3537 nrpages = __roundup_pow_of_two(nrpages);
3539 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3541 * First try to allocate an io virtual address in
3542 * DMA_BIT_MASK(32) and if that fails then try allocating
3545 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3546 IOVA_PFN(DMA_BIT_MASK(32)), false);
3550 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3551 IOVA_PFN(dma_mask), true);
3552 if (unlikely(!iova_pfn)) {
3553 dev_err(dev, "Allocating %ld-page iova failed", nrpages);
3560 static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
3562 struct dmar_domain *domain, *tmp;
3563 struct dmar_rmrr_unit *rmrr;
3564 struct device *i_dev;
3567 /* Device shouldn't be attached by any domains. */
3568 domain = find_domain(dev);
3572 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3576 /* We have a new domain - setup possible RMRRs for the device */
3578 for_each_rmrr_units(rmrr) {
3579 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3584 ret = domain_prepare_identity_map(dev, domain,
3588 dev_err(dev, "Mapping reserved region failed\n");
3593 tmp = set_domain_for_dev(dev, domain);
3594 if (!tmp || domain != tmp) {
3595 domain_exit(domain);
3601 dev_err(dev, "Allocating domain failed\n");
3606 /* Check if the dev needs to go through non-identity map and unmap process.*/
3607 static bool iommu_need_mapping(struct device *dev)
3611 if (iommu_dummy(dev))
3614 ret = identity_mapping(dev);
3616 u64 dma_mask = *dev->dma_mask;
3618 if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
3619 dma_mask = dev->coherent_dma_mask;
3621 if (dma_mask >= dma_get_required_mask(dev))
3625 * 32 bit DMA is removed from si_domain and fall back to
3626 * non-identity mapping.
3628 dmar_remove_one_dev_info(dev);
3629 ret = iommu_request_dma_domain_for_dev(dev);
3631 struct iommu_domain *domain;
3632 struct dmar_domain *dmar_domain;
3634 domain = iommu_get_domain_for_dev(dev);
3636 dmar_domain = to_dmar_domain(domain);
3637 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
3639 get_private_domain_for_dev(dev);
3642 dev_info(dev, "32bit DMA uses non-identity mapping\n");
3648 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3649 size_t size, int dir, u64 dma_mask)
3651 struct dmar_domain *domain;
3652 phys_addr_t start_paddr;
3653 unsigned long iova_pfn;
3656 struct intel_iommu *iommu;
3657 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3659 BUG_ON(dir == DMA_NONE);
3661 domain = find_domain(dev);
3663 return DMA_MAPPING_ERROR;
3665 iommu = domain_get_iommu(domain);
3666 size = aligned_nrpages(paddr, size);
3668 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3673 * Check if DMAR supports zero-length reads on write only
3676 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3677 !cap_zlr(iommu->cap))
3678 prot |= DMA_PTE_READ;
3679 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3680 prot |= DMA_PTE_WRITE;
3682 * paddr - (paddr + size) might be partial page, we should map the whole
3683 * page. Note: if two part of one page are separately mapped, we
3684 * might have two guest_addr mapping to the same host paddr, but this
3685 * is not a big problem
3687 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3688 mm_to_dma_pfn(paddr_pfn), size, prot);
3692 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3693 start_paddr += paddr & ~PAGE_MASK;
3698 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3699 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3700 size, (unsigned long long)paddr, dir);
3701 return DMA_MAPPING_ERROR;
3704 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3705 unsigned long offset, size_t size,
3706 enum dma_data_direction dir,
3707 unsigned long attrs)
3709 if (iommu_need_mapping(dev))
3710 return __intel_map_single(dev, page_to_phys(page) + offset,
3711 size, dir, *dev->dma_mask);
3712 return dma_direct_map_page(dev, page, offset, size, dir, attrs);
3715 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3716 size_t size, enum dma_data_direction dir,
3717 unsigned long attrs)
3719 if (iommu_need_mapping(dev))
3720 return __intel_map_single(dev, phys_addr, size, dir,
3722 return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3725 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3727 struct dmar_domain *domain;
3728 unsigned long start_pfn, last_pfn;
3729 unsigned long nrpages;
3730 unsigned long iova_pfn;
3731 struct intel_iommu *iommu;
3732 struct page *freelist;
3733 struct pci_dev *pdev = NULL;
3735 domain = find_domain(dev);
3738 iommu = domain_get_iommu(domain);
3740 iova_pfn = IOVA_PFN(dev_addr);
3742 nrpages = aligned_nrpages(dev_addr, size);
3743 start_pfn = mm_to_dma_pfn(iova_pfn);
3744 last_pfn = start_pfn + nrpages - 1;
3746 if (dev_is_pci(dev))
3747 pdev = to_pci_dev(dev);
3749 dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3751 freelist = domain_unmap(domain, start_pfn, last_pfn);
3753 if (intel_iommu_strict || (pdev && pdev->untrusted)) {
3754 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3755 nrpages, !freelist, 0);
3757 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3758 dma_free_pagelist(freelist);
3760 queue_iova(&domain->iovad, iova_pfn, nrpages,
3761 (unsigned long)freelist);
3763 * queue up the release of the unmap to save the 1/6th of the
3764 * cpu used up by the iotlb flush operation...
3769 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3770 size_t size, enum dma_data_direction dir,
3771 unsigned long attrs)
3773 if (iommu_need_mapping(dev))
3774 intel_unmap(dev, dev_addr, size);
3776 dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
3779 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3780 size_t size, enum dma_data_direction dir, unsigned long attrs)
3782 if (iommu_need_mapping(dev))
3783 intel_unmap(dev, dev_addr, size);
3786 static void *intel_alloc_coherent(struct device *dev, size_t size,
3787 dma_addr_t *dma_handle, gfp_t flags,
3788 unsigned long attrs)
3790 struct page *page = NULL;
3793 if (!iommu_need_mapping(dev))
3794 return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3796 size = PAGE_ALIGN(size);
3797 order = get_order(size);
3799 if (gfpflags_allow_blocking(flags)) {
3800 unsigned int count = size >> PAGE_SHIFT;
3802 page = dma_alloc_from_contiguous(dev, count, order,
3803 flags & __GFP_NOWARN);
3807 page = alloc_pages(flags, order);
3810 memset(page_address(page), 0, size);
3812 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3814 dev->coherent_dma_mask);
3815 if (*dma_handle != DMA_MAPPING_ERROR)
3816 return page_address(page);
3817 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3818 __free_pages(page, order);
3823 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3824 dma_addr_t dma_handle, unsigned long attrs)
3827 struct page *page = virt_to_page(vaddr);
3829 if (!iommu_need_mapping(dev))
3830 return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3832 size = PAGE_ALIGN(size);
3833 order = get_order(size);
3835 intel_unmap(dev, dma_handle, size);
3836 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3837 __free_pages(page, order);
3840 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3841 int nelems, enum dma_data_direction dir,
3842 unsigned long attrs)
3844 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3845 unsigned long nrpages = 0;
3846 struct scatterlist *sg;
3849 if (!iommu_need_mapping(dev))
3850 return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
3852 for_each_sg(sglist, sg, nelems, i) {
3853 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3856 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3859 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3860 enum dma_data_direction dir, unsigned long attrs)
3863 struct dmar_domain *domain;
3866 unsigned long iova_pfn;
3868 struct scatterlist *sg;
3869 unsigned long start_vpfn;
3870 struct intel_iommu *iommu;
3872 BUG_ON(dir == DMA_NONE);
3873 if (!iommu_need_mapping(dev))
3874 return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
3876 domain = find_domain(dev);
3880 iommu = domain_get_iommu(domain);
3882 for_each_sg(sglist, sg, nelems, i)
3883 size += aligned_nrpages(sg->offset, sg->length);
3885 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3888 sglist->dma_length = 0;
3893 * Check if DMAR supports zero-length reads on write only
3896 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3897 !cap_zlr(iommu->cap))
3898 prot |= DMA_PTE_READ;
3899 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3900 prot |= DMA_PTE_WRITE;
3902 start_vpfn = mm_to_dma_pfn(iova_pfn);
3904 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3905 if (unlikely(ret)) {
3906 dma_pte_free_pagetable(domain, start_vpfn,
3907 start_vpfn + size - 1,
3908 agaw_to_level(domain->agaw) + 1);
3909 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3916 static const struct dma_map_ops intel_dma_ops = {
3917 .alloc = intel_alloc_coherent,
3918 .free = intel_free_coherent,
3919 .map_sg = intel_map_sg,
3920 .unmap_sg = intel_unmap_sg,
3921 .map_page = intel_map_page,
3922 .unmap_page = intel_unmap_page,
3923 .map_resource = intel_map_resource,
3924 .unmap_resource = intel_unmap_resource,
3925 .dma_supported = dma_direct_supported,
3928 static inline int iommu_domain_cache_init(void)
3932 iommu_domain_cache = kmem_cache_create("iommu_domain",
3933 sizeof(struct dmar_domain),
3938 if (!iommu_domain_cache) {
3939 pr_err("Couldn't create iommu_domain cache\n");
3946 static inline int iommu_devinfo_cache_init(void)
3950 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3951 sizeof(struct device_domain_info),
3955 if (!iommu_devinfo_cache) {
3956 pr_err("Couldn't create devinfo cache\n");
3963 static int __init iommu_init_mempool(void)
3966 ret = iova_cache_get();
3970 ret = iommu_domain_cache_init();
3974 ret = iommu_devinfo_cache_init();
3978 kmem_cache_destroy(iommu_domain_cache);
3985 static void __init iommu_exit_mempool(void)
3987 kmem_cache_destroy(iommu_devinfo_cache);
3988 kmem_cache_destroy(iommu_domain_cache);
3992 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3994 struct dmar_drhd_unit *drhd;
3998 /* We know that this device on this chipset has its own IOMMU.
3999 * If we find it under a different IOMMU, then the BIOS is lying
4000 * to us. Hope that the IOMMU for this device is actually
4001 * disabled, and it needs no translation...
4003 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4005 /* "can't" happen */
4006 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4009 vtbar &= 0xffff0000;
4011 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4012 drhd = dmar_find_matched_drhd_unit(pdev);
4013 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4014 TAINT_FIRMWARE_WORKAROUND,
4015 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4016 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4018 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4020 static void __init init_no_remapping_devices(void)
4022 struct dmar_drhd_unit *drhd;
4026 for_each_drhd_unit(drhd) {
4027 if (!drhd->include_all) {
4028 for_each_active_dev_scope(drhd->devices,
4029 drhd->devices_cnt, i, dev)
4031 /* ignore DMAR unit if no devices exist */
4032 if (i == drhd->devices_cnt)
4037 for_each_active_drhd_unit(drhd) {
4038 if (drhd->include_all)
4041 for_each_active_dev_scope(drhd->devices,
4042 drhd->devices_cnt, i, dev)
4043 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4045 if (i < drhd->devices_cnt)
4048 /* This IOMMU has *only* gfx devices. Either bypass it or
4049 set the gfx_mapped flag, as appropriate */
4050 if (!dmar_map_gfx) {
4052 for_each_active_dev_scope(drhd->devices,
4053 drhd->devices_cnt, i, dev)
4054 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4059 #ifdef CONFIG_SUSPEND
4060 static int init_iommu_hw(void)
4062 struct dmar_drhd_unit *drhd;
4063 struct intel_iommu *iommu = NULL;
4065 for_each_active_iommu(iommu, drhd)
4067 dmar_reenable_qi(iommu);
4069 for_each_iommu(iommu, drhd) {
4070 if (drhd->ignored) {
4072 * we always have to disable PMRs or DMA may fail on
4076 iommu_disable_protect_mem_regions(iommu);
4080 iommu_flush_write_buffer(iommu);
4082 iommu_set_root_entry(iommu);
4084 iommu->flush.flush_context(iommu, 0, 0, 0,
4085 DMA_CCMD_GLOBAL_INVL);
4086 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4087 iommu_enable_translation(iommu);
4088 iommu_disable_protect_mem_regions(iommu);
4094 static void iommu_flush_all(void)
4096 struct dmar_drhd_unit *drhd;
4097 struct intel_iommu *iommu;
4099 for_each_active_iommu(iommu, drhd) {
4100 iommu->flush.flush_context(iommu, 0, 0, 0,
4101 DMA_CCMD_GLOBAL_INVL);
4102 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4103 DMA_TLB_GLOBAL_FLUSH);
4107 static int iommu_suspend(void)
4109 struct dmar_drhd_unit *drhd;
4110 struct intel_iommu *iommu = NULL;
4113 for_each_active_iommu(iommu, drhd) {
4114 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
4116 if (!iommu->iommu_state)
4122 for_each_active_iommu(iommu, drhd) {
4123 iommu_disable_translation(iommu);
4125 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4127 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4128 readl(iommu->reg + DMAR_FECTL_REG);
4129 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4130 readl(iommu->reg + DMAR_FEDATA_REG);
4131 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4132 readl(iommu->reg + DMAR_FEADDR_REG);
4133 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4134 readl(iommu->reg + DMAR_FEUADDR_REG);
4136 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4141 for_each_active_iommu(iommu, drhd)
4142 kfree(iommu->iommu_state);
4147 static void iommu_resume(void)
4149 struct dmar_drhd_unit *drhd;
4150 struct intel_iommu *iommu = NULL;
4153 if (init_iommu_hw()) {
4155 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4157 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4161 for_each_active_iommu(iommu, drhd) {
4163 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4165 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4166 iommu->reg + DMAR_FECTL_REG);
4167 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4168 iommu->reg + DMAR_FEDATA_REG);
4169 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4170 iommu->reg + DMAR_FEADDR_REG);
4171 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4172 iommu->reg + DMAR_FEUADDR_REG);
4174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4177 for_each_active_iommu(iommu, drhd)
4178 kfree(iommu->iommu_state);
4181 static struct syscore_ops iommu_syscore_ops = {
4182 .resume = iommu_resume,
4183 .suspend = iommu_suspend,
4186 static void __init init_iommu_pm_ops(void)
4188 register_syscore_ops(&iommu_syscore_ops);
4192 static inline void init_iommu_pm_ops(void) {}
4193 #endif /* CONFIG_PM */
4196 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4198 struct acpi_dmar_reserved_memory *rmrr;
4199 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4200 struct dmar_rmrr_unit *rmrru;
4203 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4207 rmrru->hdr = header;
4208 rmrr = (struct acpi_dmar_reserved_memory *)header;
4209 rmrru->base_address = rmrr->base_address;
4210 rmrru->end_address = rmrr->end_address;
4212 length = rmrr->end_address - rmrr->base_address + 1;
4213 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4218 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4219 ((void *)rmrr) + rmrr->header.length,
4220 &rmrru->devices_cnt);
4221 if (rmrru->devices_cnt && rmrru->devices == NULL)
4224 list_add(&rmrru->list, &dmar_rmrr_units);
4235 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4237 struct dmar_atsr_unit *atsru;
4238 struct acpi_dmar_atsr *tmp;
4240 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4241 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4242 if (atsr->segment != tmp->segment)
4244 if (atsr->header.length != tmp->header.length)
4246 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4253 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4255 struct acpi_dmar_atsr *atsr;
4256 struct dmar_atsr_unit *atsru;
4258 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4261 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4262 atsru = dmar_find_atsr(atsr);
4266 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4271 * If memory is allocated from slab by ACPI _DSM method, we need to
4272 * copy the memory content because the memory buffer will be freed
4275 atsru->hdr = (void *)(atsru + 1);
4276 memcpy(atsru->hdr, hdr, hdr->length);
4277 atsru->include_all = atsr->flags & 0x1;
4278 if (!atsru->include_all) {
4279 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4280 (void *)atsr + atsr->header.length,
4281 &atsru->devices_cnt);
4282 if (atsru->devices_cnt && atsru->devices == NULL) {
4288 list_add_rcu(&atsru->list, &dmar_atsr_units);
4293 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4295 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4299 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4301 struct acpi_dmar_atsr *atsr;
4302 struct dmar_atsr_unit *atsru;
4304 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4305 atsru = dmar_find_atsr(atsr);
4307 list_del_rcu(&atsru->list);
4309 intel_iommu_free_atsr(atsru);
4315 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4319 struct acpi_dmar_atsr *atsr;
4320 struct dmar_atsr_unit *atsru;
4322 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4323 atsru = dmar_find_atsr(atsr);
4327 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4328 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4336 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4339 struct intel_iommu *iommu = dmaru->iommu;
4341 if (g_iommus[iommu->seq_id])
4344 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4345 pr_warn("%s: Doesn't support hardware pass through.\n",
4349 if (!ecap_sc_support(iommu->ecap) &&
4350 domain_update_iommu_snooping(iommu)) {
4351 pr_warn("%s: Doesn't support snooping.\n",
4355 sp = domain_update_iommu_superpage(iommu) - 1;
4356 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4357 pr_warn("%s: Doesn't support large page.\n",
4363 * Disable translation if already enabled prior to OS handover.
4365 if (iommu->gcmd & DMA_GCMD_TE)
4366 iommu_disable_translation(iommu);
4368 g_iommus[iommu->seq_id] = iommu;
4369 ret = iommu_init_domains(iommu);
4371 ret = iommu_alloc_root_entry(iommu);
4375 #ifdef CONFIG_INTEL_IOMMU_SVM
4376 if (pasid_supported(iommu))
4377 intel_svm_init(iommu);
4380 if (dmaru->ignored) {
4382 * we always have to disable PMRs or DMA may fail on this device
4385 iommu_disable_protect_mem_regions(iommu);
4389 intel_iommu_init_qi(iommu);
4390 iommu_flush_write_buffer(iommu);
4392 #ifdef CONFIG_INTEL_IOMMU_SVM
4393 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4394 ret = intel_svm_enable_prq(iommu);
4399 ret = dmar_set_interrupt(iommu);
4403 iommu_set_root_entry(iommu);
4404 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4405 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4406 iommu_enable_translation(iommu);
4408 iommu_disable_protect_mem_regions(iommu);
4412 disable_dmar_iommu(iommu);
4414 free_dmar_iommu(iommu);
4418 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4421 struct intel_iommu *iommu = dmaru->iommu;
4423 if (!intel_iommu_enabled)
4429 ret = intel_iommu_add(dmaru);
4431 disable_dmar_iommu(iommu);
4432 free_dmar_iommu(iommu);
4438 static void intel_iommu_free_dmars(void)
4440 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4441 struct dmar_atsr_unit *atsru, *atsr_n;
4443 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4444 list_del(&rmrru->list);
4445 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4450 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4451 list_del(&atsru->list);
4452 intel_iommu_free_atsr(atsru);
4456 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4459 struct pci_bus *bus;
4460 struct pci_dev *bridge = NULL;
4462 struct acpi_dmar_atsr *atsr;
4463 struct dmar_atsr_unit *atsru;
4465 dev = pci_physfn(dev);
4466 for (bus = dev->bus; bus; bus = bus->parent) {
4468 /* If it's an integrated device, allow ATS */
4471 /* Connected via non-PCIe: no ATS */
4472 if (!pci_is_pcie(bridge) ||
4473 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4475 /* If we found the root port, look it up in the ATSR */
4476 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4481 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4482 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4483 if (atsr->segment != pci_domain_nr(dev->bus))
4486 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4487 if (tmp == &bridge->dev)
4490 if (atsru->include_all)
4500 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4503 struct dmar_rmrr_unit *rmrru;
4504 struct dmar_atsr_unit *atsru;
4505 struct acpi_dmar_atsr *atsr;
4506 struct acpi_dmar_reserved_memory *rmrr;
4508 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4511 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4512 rmrr = container_of(rmrru->hdr,
4513 struct acpi_dmar_reserved_memory, header);
4514 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4515 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4516 ((void *)rmrr) + rmrr->header.length,
4517 rmrr->segment, rmrru->devices,
4518 rmrru->devices_cnt);
4521 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4522 dmar_remove_dev_scope(info, rmrr->segment,
4523 rmrru->devices, rmrru->devices_cnt);
4527 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4528 if (atsru->include_all)
4531 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4532 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4533 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4534 (void *)atsr + atsr->header.length,
4535 atsr->segment, atsru->devices,
4536 atsru->devices_cnt);
4541 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4542 if (dmar_remove_dev_scope(info, atsr->segment,
4543 atsru->devices, atsru->devices_cnt))
4552 * Here we only respond to action of unbound device from driver.
4554 * Added device is not attached to its DMAR domain here yet. That will happen
4555 * when mapping the device to iova.
4557 static int device_notifier(struct notifier_block *nb,
4558 unsigned long action, void *data)
4560 struct device *dev = data;
4561 struct dmar_domain *domain;
4563 if (iommu_dummy(dev))
4566 if (action == BUS_NOTIFY_REMOVED_DEVICE) {
4567 domain = find_domain(dev);
4571 dmar_remove_one_dev_info(dev);
4572 } else if (action == BUS_NOTIFY_ADD_DEVICE) {
4573 if (iommu_should_identity_map(dev, 1))
4574 domain_add_dev_info(si_domain, dev);
4580 static struct notifier_block device_nb = {
4581 .notifier_call = device_notifier,
4584 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4585 unsigned long val, void *v)
4587 struct memory_notify *mhp = v;
4588 unsigned long long start, end;
4589 unsigned long start_vpfn, last_vpfn;
4592 case MEM_GOING_ONLINE:
4593 start = mhp->start_pfn << PAGE_SHIFT;
4594 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4595 if (iommu_domain_identity_map(si_domain, start, end)) {
4596 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4603 case MEM_CANCEL_ONLINE:
4604 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4605 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4606 while (start_vpfn <= last_vpfn) {
4608 struct dmar_drhd_unit *drhd;
4609 struct intel_iommu *iommu;
4610 struct page *freelist;
4612 iova = find_iova(&si_domain->iovad, start_vpfn);
4614 pr_debug("Failed get IOVA for PFN %lx\n",
4619 iova = split_and_remove_iova(&si_domain->iovad, iova,
4620 start_vpfn, last_vpfn);
4622 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4623 start_vpfn, last_vpfn);
4627 freelist = domain_unmap(si_domain, iova->pfn_lo,
4631 for_each_active_iommu(iommu, drhd)
4632 iommu_flush_iotlb_psi(iommu, si_domain,
4633 iova->pfn_lo, iova_size(iova),
4636 dma_free_pagelist(freelist);
4638 start_vpfn = iova->pfn_hi + 1;
4639 free_iova_mem(iova);
4647 static struct notifier_block intel_iommu_memory_nb = {
4648 .notifier_call = intel_iommu_memory_notifier,
4652 static void free_all_cpu_cached_iovas(unsigned int cpu)
4656 for (i = 0; i < g_num_of_iommus; i++) {
4657 struct intel_iommu *iommu = g_iommus[i];
4658 struct dmar_domain *domain;
4664 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4665 domain = get_iommu_domain(iommu, (u16)did);
4669 free_cpu_cached_iovas(cpu, &domain->iovad);
4674 static int intel_iommu_cpu_dead(unsigned int cpu)
4676 free_all_cpu_cached_iovas(cpu);
4680 static void intel_disable_iommus(void)
4682 struct intel_iommu *iommu = NULL;
4683 struct dmar_drhd_unit *drhd;
4685 for_each_iommu(iommu, drhd)
4686 iommu_disable_translation(iommu);
4689 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4691 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4693 return container_of(iommu_dev, struct intel_iommu, iommu);
4696 static ssize_t intel_iommu_show_version(struct device *dev,
4697 struct device_attribute *attr,
4700 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4701 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4702 return sprintf(buf, "%d:%d\n",
4703 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4705 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4707 static ssize_t intel_iommu_show_address(struct device *dev,
4708 struct device_attribute *attr,
4711 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4712 return sprintf(buf, "%llx\n", iommu->reg_phys);
4714 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4716 static ssize_t intel_iommu_show_cap(struct device *dev,
4717 struct device_attribute *attr,
4720 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4721 return sprintf(buf, "%llx\n", iommu->cap);
4723 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4725 static ssize_t intel_iommu_show_ecap(struct device *dev,
4726 struct device_attribute *attr,
4729 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4730 return sprintf(buf, "%llx\n", iommu->ecap);
4732 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4734 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4735 struct device_attribute *attr,
4738 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4739 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4741 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4743 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4744 struct device_attribute *attr,
4747 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4748 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4749 cap_ndoms(iommu->cap)));
4751 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4753 static struct attribute *intel_iommu_attrs[] = {
4754 &dev_attr_version.attr,
4755 &dev_attr_address.attr,
4757 &dev_attr_ecap.attr,
4758 &dev_attr_domains_supported.attr,
4759 &dev_attr_domains_used.attr,
4763 static struct attribute_group intel_iommu_group = {
4764 .name = "intel-iommu",
4765 .attrs = intel_iommu_attrs,
4768 const struct attribute_group *intel_iommu_groups[] = {
4773 static int __init platform_optin_force_iommu(void)
4775 struct pci_dev *pdev = NULL;
4776 bool has_untrusted_dev = false;
4778 if (!dmar_platform_optin() || no_platform_optin)
4781 for_each_pci_dev(pdev) {
4782 if (pdev->untrusted) {
4783 has_untrusted_dev = true;
4788 if (!has_untrusted_dev)
4791 if (no_iommu || dmar_disabled)
4792 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4795 * If Intel-IOMMU is disabled by default, we will apply identity
4796 * map for all devices except those marked as being untrusted.
4799 iommu_identity_mapping |= IDENTMAP_ALL;
4802 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4810 static int __init probe_acpi_namespace_devices(void)
4812 struct dmar_drhd_unit *drhd;
4813 struct intel_iommu *iommu;
4817 for_each_active_iommu(iommu, drhd) {
4818 for_each_active_dev_scope(drhd->devices,
4819 drhd->devices_cnt, i, dev) {
4820 struct acpi_device_physical_node *pn;
4821 struct iommu_group *group;
4822 struct acpi_device *adev;
4824 if (dev->bus != &acpi_bus_type)
4827 adev = to_acpi_device(dev);
4828 mutex_lock(&adev->physical_node_lock);
4829 list_for_each_entry(pn,
4830 &adev->physical_node_list, node) {
4831 group = iommu_group_get(pn->dev);
4833 iommu_group_put(group);
4837 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4838 ret = iommu_probe_device(pn->dev);
4842 mutex_unlock(&adev->physical_node_lock);
4852 int __init intel_iommu_init(void)
4855 struct dmar_drhd_unit *drhd;
4856 struct intel_iommu *iommu;
4859 * Intel IOMMU is required for a TXT/tboot launch or platform
4860 * opt in, so enforce that.
4862 force_on = tboot_force_iommu() || platform_optin_force_iommu();
4864 if (iommu_init_mempool()) {
4866 panic("tboot: Failed to initialize iommu memory\n");
4870 down_write(&dmar_global_lock);
4871 if (dmar_table_init()) {
4873 panic("tboot: Failed to initialize DMAR table\n");
4877 if (dmar_dev_scope_init() < 0) {
4879 panic("tboot: Failed to initialize DMAR device scope\n");
4883 up_write(&dmar_global_lock);
4886 * The bus notifier takes the dmar_global_lock, so lockdep will
4887 * complain later when we register it under the lock.
4889 dmar_register_bus_notifier();
4891 down_write(&dmar_global_lock);
4893 if (no_iommu || dmar_disabled) {
4895 * We exit the function here to ensure IOMMU's remapping and
4896 * mempool aren't setup, which means that the IOMMU's PMRs
4897 * won't be disabled via the call to init_dmars(). So disable
4898 * it explicitly here. The PMRs were setup by tboot prior to
4899 * calling SENTER, but the kernel is expected to reset/tear
4902 if (intel_iommu_tboot_noforce) {
4903 for_each_iommu(iommu, drhd)
4904 iommu_disable_protect_mem_regions(iommu);
4908 * Make sure the IOMMUs are switched off, even when we
4909 * boot into a kexec kernel and the previous kernel left
4912 intel_disable_iommus();
4916 if (list_empty(&dmar_rmrr_units))
4917 pr_info("No RMRR found\n");
4919 if (list_empty(&dmar_atsr_units))
4920 pr_info("No ATSR found\n");
4922 if (dmar_init_reserved_ranges()) {
4924 panic("tboot: Failed to reserve iommu ranges\n");
4925 goto out_free_reserved_range;
4929 intel_iommu_gfx_mapped = 1;
4931 init_no_remapping_devices();
4936 panic("tboot: Failed to initialize DMARs\n");
4937 pr_err("Initialization failed\n");
4938 goto out_free_reserved_range;
4940 up_write(&dmar_global_lock);
4942 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4945 dma_ops = &intel_dma_ops;
4947 init_iommu_pm_ops();
4949 for_each_active_iommu(iommu, drhd) {
4950 iommu_device_sysfs_add(&iommu->iommu, NULL,
4953 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4954 iommu_device_register(&iommu->iommu);
4957 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4958 bus_register_notifier(&pci_bus_type, &device_nb);
4959 if (si_domain && !hw_pass_through)
4960 register_memory_notifier(&intel_iommu_memory_nb);
4961 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4962 intel_iommu_cpu_dead);
4964 if (probe_acpi_namespace_devices())
4965 pr_warn("ACPI name space devices didn't probe correctly\n");
4967 /* Finally, we enable the DMA remapping hardware. */
4968 for_each_iommu(iommu, drhd) {
4969 if (!translation_pre_enabled(iommu))
4970 iommu_enable_translation(iommu);
4972 iommu_disable_protect_mem_regions(iommu);
4974 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4976 intel_iommu_enabled = 1;
4977 intel_iommu_debugfs_init();
4981 out_free_reserved_range:
4982 put_iova_domain(&reserved_iova_list);
4984 intel_iommu_free_dmars();
4985 up_write(&dmar_global_lock);
4986 iommu_exit_mempool();
4990 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4992 struct intel_iommu *iommu = opaque;
4994 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4999 * NB - intel-iommu lacks any sort of reference counting for the users of
5000 * dependent devices. If multiple endpoints have intersecting dependent
5001 * devices, unbinding the driver from any one of them will possibly leave
5002 * the others unable to operate.
5004 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
5006 if (!iommu || !dev || !dev_is_pci(dev))
5009 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
5012 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
5014 struct dmar_domain *domain;
5015 struct intel_iommu *iommu;
5016 unsigned long flags;
5018 assert_spin_locked(&device_domain_lock);
5023 iommu = info->iommu;
5024 domain = info->domain;
5027 if (dev_is_pci(info->dev) && sm_supported(iommu))
5028 intel_pasid_tear_down_entry(iommu, info->dev,
5031 iommu_disable_dev_iotlb(info);
5032 domain_context_clear(iommu, info->dev);
5033 intel_pasid_free_table(info->dev);
5036 unlink_domain_info(info);
5038 spin_lock_irqsave(&iommu->lock, flags);
5039 domain_detach_iommu(domain, iommu);
5040 spin_unlock_irqrestore(&iommu->lock, flags);
5042 /* free the private domain */
5043 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
5044 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
5045 domain_exit(info->domain);
5047 free_devinfo_mem(info);
5050 static void dmar_remove_one_dev_info(struct device *dev)
5052 struct device_domain_info *info;
5053 unsigned long flags;
5055 spin_lock_irqsave(&device_domain_lock, flags);
5056 info = dev->archdata.iommu;
5057 __dmar_remove_one_dev_info(info);
5058 spin_unlock_irqrestore(&device_domain_lock, flags);
5061 static int md_domain_init(struct dmar_domain *domain, int guest_width)
5065 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
5066 domain_reserve_special_ranges(domain);
5068 /* calculate AGAW */
5069 domain->gaw = guest_width;
5070 adjust_width = guestwidth_to_adjustwidth(guest_width);
5071 domain->agaw = width_to_agaw(adjust_width);
5073 domain->iommu_coherency = 0;
5074 domain->iommu_snooping = 0;
5075 domain->iommu_superpage = 0;
5076 domain->max_addr = 0;
5078 /* always allocate the top pgd */
5079 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5082 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5086 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5088 struct dmar_domain *dmar_domain;
5089 struct iommu_domain *domain;
5092 case IOMMU_DOMAIN_DMA:
5094 case IOMMU_DOMAIN_UNMANAGED:
5095 dmar_domain = alloc_domain(0);
5097 pr_err("Can't allocate dmar_domain\n");
5100 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5101 pr_err("Domain initialization failed\n");
5102 domain_exit(dmar_domain);
5106 if (type == IOMMU_DOMAIN_DMA &&
5107 init_iova_flush_queue(&dmar_domain->iovad,
5108 iommu_flush_iova, iova_entry_free)) {
5109 pr_warn("iova flush queue initialization failed\n");
5110 intel_iommu_strict = 1;
5113 domain_update_iommu_cap(dmar_domain);
5115 domain = &dmar_domain->domain;
5116 domain->geometry.aperture_start = 0;
5117 domain->geometry.aperture_end =
5118 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5119 domain->geometry.force_aperture = true;
5122 case IOMMU_DOMAIN_IDENTITY:
5123 return &si_domain->domain;
5131 static void intel_iommu_domain_free(struct iommu_domain *domain)
5133 if (domain != &si_domain->domain)
5134 domain_exit(to_dmar_domain(domain));
5138 * Check whether a @domain could be attached to the @dev through the
5139 * aux-domain attach/detach APIs.
5142 is_aux_domain(struct device *dev, struct iommu_domain *domain)
5144 struct device_domain_info *info = dev->archdata.iommu;
5146 return info && info->auxd_enabled &&
5147 domain->type == IOMMU_DOMAIN_UNMANAGED;
5150 static void auxiliary_link_device(struct dmar_domain *domain,
5153 struct device_domain_info *info = dev->archdata.iommu;
5155 assert_spin_locked(&device_domain_lock);
5159 domain->auxd_refcnt++;
5160 list_add(&domain->auxd, &info->auxiliary_domains);
5163 static void auxiliary_unlink_device(struct dmar_domain *domain,
5166 struct device_domain_info *info = dev->archdata.iommu;
5168 assert_spin_locked(&device_domain_lock);
5172 list_del(&domain->auxd);
5173 domain->auxd_refcnt--;
5175 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5176 intel_pasid_free_id(domain->default_pasid);
5179 static int aux_domain_add_dev(struct dmar_domain *domain,
5184 unsigned long flags;
5185 struct intel_iommu *iommu;
5187 iommu = device_to_iommu(dev, &bus, &devfn);
5191 if (domain->default_pasid <= 0) {
5194 pasid = intel_pasid_alloc_id(domain, PASID_MIN,
5195 pci_max_pasids(to_pci_dev(dev)),
5198 pr_err("Can't allocate default pasid\n");
5201 domain->default_pasid = pasid;
5204 spin_lock_irqsave(&device_domain_lock, flags);
5206 * iommu->lock must be held to attach domain to iommu and setup the
5207 * pasid entry for second level translation.
5209 spin_lock(&iommu->lock);
5210 ret = domain_attach_iommu(domain, iommu);
5214 /* Setup the PASID entry for mediated devices: */
5215 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5216 domain->default_pasid);
5219 spin_unlock(&iommu->lock);
5221 auxiliary_link_device(domain, dev);
5223 spin_unlock_irqrestore(&device_domain_lock, flags);
5228 domain_detach_iommu(domain, iommu);
5230 spin_unlock(&iommu->lock);
5231 spin_unlock_irqrestore(&device_domain_lock, flags);
5232 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5233 intel_pasid_free_id(domain->default_pasid);
5238 static void aux_domain_remove_dev(struct dmar_domain *domain,
5241 struct device_domain_info *info;
5242 struct intel_iommu *iommu;
5243 unsigned long flags;
5245 if (!is_aux_domain(dev, &domain->domain))
5248 spin_lock_irqsave(&device_domain_lock, flags);
5249 info = dev->archdata.iommu;
5250 iommu = info->iommu;
5252 auxiliary_unlink_device(domain, dev);
5254 spin_lock(&iommu->lock);
5255 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
5256 domain_detach_iommu(domain, iommu);
5257 spin_unlock(&iommu->lock);
5259 spin_unlock_irqrestore(&device_domain_lock, flags);
5262 static int prepare_domain_attach_device(struct iommu_domain *domain,
5265 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5266 struct intel_iommu *iommu;
5270 iommu = device_to_iommu(dev, &bus, &devfn);
5274 /* check if this iommu agaw is sufficient for max mapped address */
5275 addr_width = agaw_to_width(iommu->agaw);
5276 if (addr_width > cap_mgaw(iommu->cap))
5277 addr_width = cap_mgaw(iommu->cap);
5279 if (dmar_domain->max_addr > (1LL << addr_width)) {
5280 dev_err(dev, "%s: iommu width (%d) is not "
5281 "sufficient for the mapped address (%llx)\n",
5282 __func__, addr_width, dmar_domain->max_addr);
5285 dmar_domain->gaw = addr_width;
5288 * Knock out extra levels of page tables if necessary
5290 while (iommu->agaw < dmar_domain->agaw) {
5291 struct dma_pte *pte;
5293 pte = dmar_domain->pgd;
5294 if (dma_pte_present(pte)) {
5295 dmar_domain->pgd = (struct dma_pte *)
5296 phys_to_virt(dma_pte_addr(pte));
5297 free_pgtable_page(pte);
5299 dmar_domain->agaw--;
5305 static int intel_iommu_attach_device(struct iommu_domain *domain,
5310 if (device_is_rmrr_locked(dev)) {
5311 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5315 if (is_aux_domain(dev, domain))
5318 /* normally dev is not mapped */
5319 if (unlikely(domain_context_mapped(dev))) {
5320 struct dmar_domain *old_domain;
5322 old_domain = find_domain(dev);
5324 dmar_remove_one_dev_info(dev);
5327 ret = prepare_domain_attach_device(domain, dev);
5331 return domain_add_dev_info(to_dmar_domain(domain), dev);
5334 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5339 if (!is_aux_domain(dev, domain))
5342 ret = prepare_domain_attach_device(domain, dev);
5346 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5349 static void intel_iommu_detach_device(struct iommu_domain *domain,
5352 dmar_remove_one_dev_info(dev);
5355 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5358 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5361 static int intel_iommu_map(struct iommu_domain *domain,
5362 unsigned long iova, phys_addr_t hpa,
5363 size_t size, int iommu_prot)
5365 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5370 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5373 if (iommu_prot & IOMMU_READ)
5374 prot |= DMA_PTE_READ;
5375 if (iommu_prot & IOMMU_WRITE)
5376 prot |= DMA_PTE_WRITE;
5377 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5378 prot |= DMA_PTE_SNP;
5380 max_addr = iova + size;
5381 if (dmar_domain->max_addr < max_addr) {
5384 /* check if minimum agaw is sufficient for mapped address */
5385 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5386 if (end < max_addr) {
5387 pr_err("%s: iommu width (%d) is not "
5388 "sufficient for the mapped address (%llx)\n",
5389 __func__, dmar_domain->gaw, max_addr);
5392 dmar_domain->max_addr = max_addr;
5394 /* Round up size to next multiple of PAGE_SIZE, if it and
5395 the low bits of hpa would take us onto the next page */
5396 size = aligned_nrpages(hpa, size);
5397 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5398 hpa >> VTD_PAGE_SHIFT, size, prot);
5402 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5403 unsigned long iova, size_t size)
5405 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5406 struct page *freelist = NULL;
5407 unsigned long start_pfn, last_pfn;
5408 unsigned int npages;
5409 int iommu_id, level = 0;
5411 /* Cope with horrid API which requires us to unmap more than the
5412 size argument if it happens to be a large-page mapping. */
5413 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5414 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5417 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5418 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5420 start_pfn = iova >> VTD_PAGE_SHIFT;
5421 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5423 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5425 npages = last_pfn - start_pfn + 1;
5427 for_each_domain_iommu(iommu_id, dmar_domain)
5428 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5429 start_pfn, npages, !freelist, 0);
5431 dma_free_pagelist(freelist);
5433 if (dmar_domain->max_addr == iova + size)
5434 dmar_domain->max_addr = iova;
5439 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5442 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5443 struct dma_pte *pte;
5447 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5450 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5452 phys = dma_pte_addr(pte);
5457 static inline bool scalable_mode_support(void)
5459 struct dmar_drhd_unit *drhd;
5460 struct intel_iommu *iommu;
5464 for_each_active_iommu(iommu, drhd) {
5465 if (!sm_supported(iommu)) {
5475 static inline bool iommu_pasid_support(void)
5477 struct dmar_drhd_unit *drhd;
5478 struct intel_iommu *iommu;
5482 for_each_active_iommu(iommu, drhd) {
5483 if (!pasid_supported(iommu)) {
5493 static bool intel_iommu_capable(enum iommu_cap cap)
5495 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5496 return domain_update_iommu_snooping(NULL) == 1;
5497 if (cap == IOMMU_CAP_INTR_REMAP)
5498 return irq_remapping_enabled == 1;
5503 static int intel_iommu_add_device(struct device *dev)
5505 struct dmar_domain *dmar_domain;
5506 struct iommu_domain *domain;
5507 struct intel_iommu *iommu;
5508 struct iommu_group *group;
5512 iommu = device_to_iommu(dev, &bus, &devfn);
5516 iommu_device_link(&iommu->iommu, dev);
5518 if (translation_pre_enabled(iommu))
5519 dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
5521 group = iommu_group_get_for_dev(dev);
5524 return PTR_ERR(group);
5526 iommu_group_put(group);
5528 domain = iommu_get_domain_for_dev(dev);
5529 dmar_domain = to_dmar_domain(domain);
5530 if (domain->type == IOMMU_DOMAIN_DMA) {
5531 if (device_def_domain_type(dev, 1) == IOMMU_DOMAIN_IDENTITY) {
5532 ret = iommu_request_dm_for_dev(dev);
5534 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5535 domain_add_dev_info(si_domain, dev);
5537 "Device uses a private identity domain.\n");
5544 if (device_def_domain_type(dev, 1) == IOMMU_DOMAIN_DMA) {
5545 ret = iommu_request_dma_domain_for_dev(dev);
5547 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5548 if (!get_private_domain_for_dev(dev)) {
5550 "Failed to get a private domain.\n");
5555 "Device uses a private dma domain.\n");
5566 static void intel_iommu_remove_device(struct device *dev)
5568 struct intel_iommu *iommu;
5571 iommu = device_to_iommu(dev, &bus, &devfn);
5575 iommu_group_remove_device(dev);
5577 iommu_device_unlink(&iommu->iommu, dev);
5580 static void intel_iommu_get_resv_regions(struct device *device,
5581 struct list_head *head)
5583 struct iommu_resv_region *reg;
5584 struct dmar_rmrr_unit *rmrr;
5585 struct device *i_dev;
5589 for_each_rmrr_units(rmrr) {
5590 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5592 if (i_dev != device)
5595 list_add_tail(&rmrr->resv->list, head);
5600 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5601 if (dev_is_pci(device)) {
5602 struct pci_dev *pdev = to_pci_dev(device);
5604 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5605 reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
5608 list_add_tail(®->list, head);
5611 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5613 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5614 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5618 list_add_tail(®->list, head);
5621 static void intel_iommu_put_resv_regions(struct device *dev,
5622 struct list_head *head)
5624 struct iommu_resv_region *entry, *next;
5626 list_for_each_entry_safe(entry, next, head, list) {
5627 if (entry->type == IOMMU_RESV_MSI)
5632 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5634 struct device_domain_info *info;
5635 struct context_entry *context;
5636 struct dmar_domain *domain;
5637 unsigned long flags;
5641 domain = find_domain(dev);
5645 spin_lock_irqsave(&device_domain_lock, flags);
5646 spin_lock(&iommu->lock);
5649 info = dev->archdata.iommu;
5650 if (!info || !info->pasid_supported)
5653 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5654 if (WARN_ON(!context))
5657 ctx_lo = context[0].lo;
5659 if (!(ctx_lo & CONTEXT_PASIDE)) {
5660 ctx_lo |= CONTEXT_PASIDE;
5661 context[0].lo = ctx_lo;
5663 iommu->flush.flush_context(iommu,
5664 domain->iommu_did[iommu->seq_id],
5665 PCI_DEVID(info->bus, info->devfn),
5666 DMA_CCMD_MASK_NOBIT,
5667 DMA_CCMD_DEVICE_INVL);
5670 /* Enable PASID support in the device, if it wasn't already */
5671 if (!info->pasid_enabled)
5672 iommu_enable_dev_iotlb(info);
5677 spin_unlock(&iommu->lock);
5678 spin_unlock_irqrestore(&device_domain_lock, flags);
5683 static void intel_iommu_apply_resv_region(struct device *dev,
5684 struct iommu_domain *domain,
5685 struct iommu_resv_region *region)
5687 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5688 unsigned long start, end;
5690 start = IOVA_PFN(region->start);
5691 end = IOVA_PFN(region->start + region->length - 1);
5693 WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
5696 #ifdef CONFIG_INTEL_IOMMU_SVM
5697 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5699 struct intel_iommu *iommu;
5702 if (iommu_dummy(dev)) {
5704 "No IOMMU translation for device; cannot enable SVM\n");
5708 iommu = device_to_iommu(dev, &bus, &devfn);
5710 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5716 #endif /* CONFIG_INTEL_IOMMU_SVM */
5718 static int intel_iommu_enable_auxd(struct device *dev)
5720 struct device_domain_info *info;
5721 struct intel_iommu *iommu;
5722 unsigned long flags;
5726 iommu = device_to_iommu(dev, &bus, &devfn);
5727 if (!iommu || dmar_disabled)
5730 if (!sm_supported(iommu) || !pasid_supported(iommu))
5733 ret = intel_iommu_enable_pasid(iommu, dev);
5737 spin_lock_irqsave(&device_domain_lock, flags);
5738 info = dev->archdata.iommu;
5739 info->auxd_enabled = 1;
5740 spin_unlock_irqrestore(&device_domain_lock, flags);
5745 static int intel_iommu_disable_auxd(struct device *dev)
5747 struct device_domain_info *info;
5748 unsigned long flags;
5750 spin_lock_irqsave(&device_domain_lock, flags);
5751 info = dev->archdata.iommu;
5752 if (!WARN_ON(!info))
5753 info->auxd_enabled = 0;
5754 spin_unlock_irqrestore(&device_domain_lock, flags);
5760 * A PCI express designated vendor specific extended capability is defined
5761 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5762 * for system software and tools to detect endpoint devices supporting the
5763 * Intel scalable IO virtualization without host driver dependency.
5765 * Returns the address of the matching extended capability structure within
5766 * the device's PCI configuration space or 0 if the device does not support
5769 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5774 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5776 pci_read_config_word(pdev, pos + 4, &vendor);
5777 pci_read_config_word(pdev, pos + 8, &id);
5778 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5781 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5788 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5790 if (feat == IOMMU_DEV_FEAT_AUX) {
5793 if (!dev_is_pci(dev) || dmar_disabled ||
5794 !scalable_mode_support() || !iommu_pasid_support())
5797 ret = pci_pasid_features(to_pci_dev(dev));
5801 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5808 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5810 if (feat == IOMMU_DEV_FEAT_AUX)
5811 return intel_iommu_enable_auxd(dev);
5817 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5819 if (feat == IOMMU_DEV_FEAT_AUX)
5820 return intel_iommu_disable_auxd(dev);
5826 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5828 struct device_domain_info *info = dev->archdata.iommu;
5830 if (feat == IOMMU_DEV_FEAT_AUX)
5831 return scalable_mode_support() && info && info->auxd_enabled;
5837 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5839 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5841 return dmar_domain->default_pasid > 0 ?
5842 dmar_domain->default_pasid : -EINVAL;
5845 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5848 return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
5851 const struct iommu_ops intel_iommu_ops = {
5852 .capable = intel_iommu_capable,
5853 .domain_alloc = intel_iommu_domain_alloc,
5854 .domain_free = intel_iommu_domain_free,
5855 .attach_dev = intel_iommu_attach_device,
5856 .detach_dev = intel_iommu_detach_device,
5857 .aux_attach_dev = intel_iommu_aux_attach_device,
5858 .aux_detach_dev = intel_iommu_aux_detach_device,
5859 .aux_get_pasid = intel_iommu_aux_get_pasid,
5860 .map = intel_iommu_map,
5861 .unmap = intel_iommu_unmap,
5862 .iova_to_phys = intel_iommu_iova_to_phys,
5863 .add_device = intel_iommu_add_device,
5864 .remove_device = intel_iommu_remove_device,
5865 .get_resv_regions = intel_iommu_get_resv_regions,
5866 .put_resv_regions = intel_iommu_put_resv_regions,
5867 .apply_resv_region = intel_iommu_apply_resv_region,
5868 .device_group = pci_device_group,
5869 .dev_has_feat = intel_iommu_dev_has_feat,
5870 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5871 .dev_enable_feat = intel_iommu_dev_enable_feat,
5872 .dev_disable_feat = intel_iommu_dev_disable_feat,
5873 .is_attach_deferred = intel_iommu_is_attach_deferred,
5874 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5877 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5879 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5880 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5884 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5885 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5886 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5887 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5888 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5889 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5890 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5892 static void quirk_iommu_rwbf(struct pci_dev *dev)
5895 * Mobile 4 Series Chipset neglects to set RWBF capability,
5896 * but needs it. Same seems to hold for the desktop versions.
5898 pci_info(dev, "Forcing write-buffer flush capability\n");
5902 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5903 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5904 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5905 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5906 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5907 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5908 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5911 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5912 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5913 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5914 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5915 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5916 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5917 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5918 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5920 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5924 if (pci_read_config_word(dev, GGC, &ggc))
5927 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5928 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5930 } else if (dmar_map_gfx) {
5931 /* we have to ensure the gfx device is idle before we flush */
5932 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5933 intel_iommu_strict = 1;
5936 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5937 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5938 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5939 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5941 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5942 ISOCH DMAR unit for the Azalia sound device, but not give it any
5943 TLB entries, which causes it to deadlock. Check for that. We do
5944 this in a function called from init_dmars(), instead of in a PCI
5945 quirk, because we don't want to print the obnoxious "BIOS broken"
5946 message if VT-d is actually disabled.
5948 static void __init check_tylersburg_isoch(void)
5950 struct pci_dev *pdev;
5951 uint32_t vtisochctrl;
5953 /* If there's no Azalia in the system anyway, forget it. */
5954 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5959 /* System Management Registers. Might be hidden, in which case
5960 we can't do the sanity check. But that's OK, because the
5961 known-broken BIOSes _don't_ actually hide it, so far. */
5962 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5966 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5973 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5974 if (vtisochctrl & 1)
5977 /* Drop all bits other than the number of TLB entries */
5978 vtisochctrl &= 0x1c;
5980 /* If we have the recommended number of TLB entries (16), fine. */
5981 if (vtisochctrl == 0x10)
5984 /* Zero TLB entries? You get to ride the short bus to school. */
5986 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5987 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5988 dmi_get_system_info(DMI_BIOS_VENDOR),
5989 dmi_get_system_info(DMI_BIOS_VERSION),
5990 dmi_get_system_info(DMI_PRODUCT_VERSION));
5991 iommu_identity_mapping |= IDENTMAP_AZALIA;
5995 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",