1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Joerg Roedel <jroedel@suse.de>
13 #define pr_fmt(fmt) "DMAR: " fmt
14 #define dev_fmt(fmt) pr_fmt(fmt)
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/debugfs.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/irq.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/pci.h>
25 #include <linux/dmar.h>
26 #include <linux/dma-map-ops.h>
27 #include <linux/mempool.h>
28 #include <linux/memory.h>
29 #include <linux/cpu.h>
30 #include <linux/timer.h>
32 #include <linux/iova.h>
33 #include <linux/iommu.h>
34 #include <linux/dma-iommu.h>
35 #include <linux/intel-iommu.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/tboot.h>
38 #include <linux/dmi.h>
39 #include <linux/pci-ats.h>
40 #include <linux/memblock.h>
41 #include <linux/dma-direct.h>
42 #include <linux/crash_dump.h>
43 #include <linux/numa.h>
44 #include <asm/irq_remapping.h>
45 #include <asm/cacheflush.h>
46 #include <asm/iommu.h>
48 #include "../irq_remapping.h"
50 #include "cap_audit.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
57 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
58 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60 #define IOAPIC_RANGE_START (0xfee00000)
61 #define IOAPIC_RANGE_END (0xfeefffff)
62 #define IOVA_START_ADDR (0x1000)
64 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
66 #define MAX_AGAW_WIDTH 64
67 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
70 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
72 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
75 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78 /* IO virtual address start page frame number */
79 #define IOVA_START_PFN (1)
81 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 /* page table handling */
84 #define LEVEL_STRIDE (9)
85 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
88 * This bitmap is used to advertise the page sizes our hardware support
89 * to the IOMMU core, which will then use this information to split
90 * physically contiguous memory regions it is mapping into page sizes
93 * Traditionally the IOMMU core just handed us the mappings directly,
94 * after making sure the size is an order of a 4KiB page and that the
95 * mapping has natural alignment.
97 * To retain this behavior, we currently advertise that we support
98 * all page sizes that are an order of 4KiB.
100 * If at some point we'd like to utilize the IOMMU core's new behavior,
101 * we could change this to advertise the real page sizes we support.
103 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
105 static inline int agaw_to_level(int agaw)
110 static inline int agaw_to_width(int agaw)
112 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
115 static inline int width_to_agaw(int width)
117 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
120 static inline unsigned int level_to_offset_bits(int level)
122 return (level - 1) * LEVEL_STRIDE;
125 static inline int pfn_level_offset(u64 pfn, int level)
127 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
130 static inline u64 level_mask(int level)
132 return -1ULL << level_to_offset_bits(level);
135 static inline u64 level_size(int level)
137 return 1ULL << level_to_offset_bits(level);
140 static inline u64 align_to_level(u64 pfn, int level)
142 return (pfn + level_size(level) - 1) & level_mask(level);
145 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
147 return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
150 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
151 are never going to work. */
152 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
154 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
157 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
159 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
161 static inline unsigned long page_to_dma_pfn(struct page *pg)
163 return mm_to_dma_pfn(page_to_pfn(pg));
165 static inline unsigned long virt_to_dma_pfn(void *p)
167 return page_to_dma_pfn(virt_to_page(p));
170 /* global iommu list, set NULL for ignored DMAR units */
171 static struct intel_iommu **g_iommus;
173 static void __init check_tylersburg_isoch(void);
174 static int rwbf_quirk;
177 * set to 1 to panic kernel if can't successfully enable VT-d
178 * (used when kernel is launched w/ TXT)
180 static int force_on = 0;
181 static int intel_iommu_tboot_noforce;
182 static int no_platform_optin;
184 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
190 static phys_addr_t root_entry_lctp(struct root_entry *re)
195 return re->lo & VTD_PAGE_MASK;
199 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
202 static phys_addr_t root_entry_uctp(struct root_entry *re)
207 return re->hi & VTD_PAGE_MASK;
210 static inline void context_clear_pasid_enable(struct context_entry *context)
212 context->lo &= ~(1ULL << 11);
215 static inline bool context_pasid_enabled(struct context_entry *context)
217 return !!(context->lo & (1ULL << 11));
220 static inline void context_set_copied(struct context_entry *context)
222 context->hi |= (1ull << 3);
225 static inline bool context_copied(struct context_entry *context)
227 return !!(context->hi & (1ULL << 3));
230 static inline bool __context_present(struct context_entry *context)
232 return (context->lo & 1);
235 bool context_present(struct context_entry *context)
237 return context_pasid_enabled(context) ?
238 __context_present(context) :
239 __context_present(context) && !context_copied(context);
242 static inline void context_set_present(struct context_entry *context)
247 static inline void context_set_fault_enable(struct context_entry *context)
249 context->lo &= (((u64)-1) << 2) | 1;
252 static inline void context_set_translation_type(struct context_entry *context,
255 context->lo &= (((u64)-1) << 4) | 3;
256 context->lo |= (value & 3) << 2;
259 static inline void context_set_address_root(struct context_entry *context,
262 context->lo &= ~VTD_PAGE_MASK;
263 context->lo |= value & VTD_PAGE_MASK;
266 static inline void context_set_address_width(struct context_entry *context,
269 context->hi |= value & 7;
272 static inline void context_set_domain_id(struct context_entry *context,
275 context->hi |= (value & ((1 << 16) - 1)) << 8;
278 static inline int context_domain_id(struct context_entry *c)
280 return((c->hi >> 8) & 0xffff);
283 static inline void context_clear_entry(struct context_entry *context)
290 * This domain is a statically identity mapping domain.
291 * 1. This domain creats a static 1:1 mapping to all usable memory.
292 * 2. It maps to each iommu if successful.
293 * 3. Each iommu mapps to this domain if successful.
295 static struct dmar_domain *si_domain;
296 static int hw_pass_through = 1;
298 #define for_each_domain_iommu(idx, domain) \
299 for (idx = 0; idx < g_num_of_iommus; idx++) \
300 if (domain->iommu_refcnt[idx])
302 struct dmar_rmrr_unit {
303 struct list_head list; /* list of rmrr units */
304 struct acpi_dmar_header *hdr; /* ACPI header */
305 u64 base_address; /* reserved base address*/
306 u64 end_address; /* reserved end address */
307 struct dmar_dev_scope *devices; /* target devices */
308 int devices_cnt; /* target device count */
311 struct dmar_atsr_unit {
312 struct list_head list; /* list of ATSR units */
313 struct acpi_dmar_header *hdr; /* ACPI header */
314 struct dmar_dev_scope *devices; /* target devices */
315 int devices_cnt; /* target device count */
316 u8 include_all:1; /* include all ports */
319 struct dmar_satc_unit {
320 struct list_head list; /* list of SATC units */
321 struct acpi_dmar_header *hdr; /* ACPI header */
322 struct dmar_dev_scope *devices; /* target devices */
323 struct intel_iommu *iommu; /* the corresponding iommu */
324 int devices_cnt; /* target device count */
325 u8 atc_required:1; /* ATS is required */
328 static LIST_HEAD(dmar_atsr_units);
329 static LIST_HEAD(dmar_rmrr_units);
330 static LIST_HEAD(dmar_satc_units);
332 #define for_each_rmrr_units(rmrr) \
333 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
335 /* bitmap for indexing intel_iommus */
336 static int g_num_of_iommus;
338 static void domain_exit(struct dmar_domain *domain);
339 static void domain_remove_dev_info(struct dmar_domain *domain);
340 static void dmar_remove_one_dev_info(struct device *dev);
341 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342 static int intel_iommu_attach_device(struct iommu_domain *domain,
344 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
347 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
348 int dmar_disabled = 0;
350 int dmar_disabled = 1;
351 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
353 #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
354 int intel_iommu_sm = 1;
357 #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
359 int intel_iommu_enabled = 0;
360 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
362 static int dmar_map_gfx = 1;
363 static int intel_iommu_strict;
364 static int intel_iommu_superpage = 1;
365 static int iommu_identity_mapping;
366 static int iommu_skip_te_disable;
368 #define IDENTMAP_GFX 2
369 #define IDENTMAP_AZALIA 4
371 int intel_iommu_gfx_mapped;
372 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
374 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
375 struct device_domain_info *get_domain_info(struct device *dev)
377 struct device_domain_info *info;
382 info = dev_iommu_priv_get(dev);
383 if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
389 DEFINE_SPINLOCK(device_domain_lock);
390 static LIST_HEAD(device_domain_list);
393 * Iterate over elements in device_domain_list and call the specified
394 * callback @fn against each element.
396 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
397 void *data), void *data)
401 struct device_domain_info *info;
403 spin_lock_irqsave(&device_domain_lock, flags);
404 list_for_each_entry(info, &device_domain_list, global) {
405 ret = fn(info, data);
407 spin_unlock_irqrestore(&device_domain_lock, flags);
411 spin_unlock_irqrestore(&device_domain_lock, flags);
416 const struct iommu_ops intel_iommu_ops;
418 static bool translation_pre_enabled(struct intel_iommu *iommu)
420 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
423 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
425 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
428 static void init_translation_status(struct intel_iommu *iommu)
432 gsts = readl(iommu->reg + DMAR_GSTS_REG);
433 if (gsts & DMA_GSTS_TES)
434 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
437 static int __init intel_iommu_setup(char *str)
442 if (!strncmp(str, "on", 2)) {
444 pr_info("IOMMU enabled\n");
445 } else if (!strncmp(str, "off", 3)) {
447 no_platform_optin = 1;
448 pr_info("IOMMU disabled\n");
449 } else if (!strncmp(str, "igfx_off", 8)) {
451 pr_info("Disable GFX device mapping\n");
452 } else if (!strncmp(str, "forcedac", 8)) {
453 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
454 iommu_dma_forcedac = true;
455 } else if (!strncmp(str, "strict", 6)) {
456 pr_info("Disable batched IOTLB flush\n");
457 intel_iommu_strict = 1;
458 } else if (!strncmp(str, "sp_off", 6)) {
459 pr_info("Disable supported super page\n");
460 intel_iommu_superpage = 0;
461 } else if (!strncmp(str, "sm_on", 5)) {
462 pr_info("Intel-IOMMU: scalable mode supported\n");
464 } else if (!strncmp(str, "tboot_noforce", 13)) {
465 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
466 intel_iommu_tboot_noforce = 1;
469 str += strcspn(str, ",");
475 __setup("intel_iommu=", intel_iommu_setup);
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
480 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
482 struct dmar_domain **domains;
485 domains = iommu->domains[idx];
489 return domains[did & 0xff];
492 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
493 struct dmar_domain *domain)
495 struct dmar_domain **domains;
498 if (!iommu->domains[idx]) {
499 size_t size = 256 * sizeof(struct dmar_domain *);
500 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
503 domains = iommu->domains[idx];
504 if (WARN_ON(!domains))
507 domains[did & 0xff] = domain;
510 void *alloc_pgtable_page(int node)
515 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
517 vaddr = page_address(page);
521 void free_pgtable_page(void *vaddr)
523 free_page((unsigned long)vaddr);
526 static inline void *alloc_domain_mem(void)
528 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
531 static void free_domain_mem(void *vaddr)
533 kmem_cache_free(iommu_domain_cache, vaddr);
536 static inline void * alloc_devinfo_mem(void)
538 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
541 static inline void free_devinfo_mem(void *vaddr)
543 kmem_cache_free(iommu_devinfo_cache, vaddr);
546 static inline int domain_type_is_si(struct dmar_domain *domain)
548 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
551 static inline bool domain_use_first_level(struct dmar_domain *domain)
553 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
556 static inline int domain_pfn_supported(struct dmar_domain *domain,
559 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
561 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
564 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
569 sagaw = cap_sagaw(iommu->cap);
570 for (agaw = width_to_agaw(max_gaw);
572 if (test_bit(agaw, &sagaw))
580 * Calculate max SAGAW for each iommu.
582 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
584 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
588 * calculate agaw for each iommu.
589 * "SAGAW" may be different across iommus, use a default agaw, and
590 * get a supported less agaw for iommus that don't support the default agaw.
592 int iommu_calculate_agaw(struct intel_iommu *iommu)
594 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
597 /* This functionin only returns single iommu in a domain */
598 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
602 /* si_domain and vm domain should not get here. */
603 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
606 for_each_domain_iommu(iommu_id, domain)
609 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
612 return g_iommus[iommu_id];
615 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
617 return sm_supported(iommu) ?
618 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
621 static void domain_update_iommu_coherency(struct dmar_domain *domain)
623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu;
628 domain->iommu_coherency = 1;
630 for_each_domain_iommu(i, domain) {
632 if (!iommu_paging_structure_coherency(g_iommus[i])) {
633 domain->iommu_coherency = 0;
640 /* No hardware attached; use lowest common denominator */
642 for_each_active_iommu(iommu, drhd) {
643 if (!iommu_paging_structure_coherency(iommu)) {
644 domain->iommu_coherency = 0;
651 static int domain_update_iommu_snooping(struct intel_iommu *skip)
653 struct dmar_drhd_unit *drhd;
654 struct intel_iommu *iommu;
658 for_each_active_iommu(iommu, drhd) {
661 * If the hardware is operating in the scalable mode,
662 * the snooping control is always supported since we
663 * always set PASID-table-entry.PGSNP bit if the domain
664 * is managed outside (UNMANAGED).
666 if (!sm_supported(iommu) &&
667 !ecap_sc_support(iommu->ecap)) {
678 static int domain_update_iommu_superpage(struct dmar_domain *domain,
679 struct intel_iommu *skip)
681 struct dmar_drhd_unit *drhd;
682 struct intel_iommu *iommu;
685 if (!intel_iommu_superpage) {
689 /* set iommu_superpage to the smallest common denominator */
691 for_each_active_iommu(iommu, drhd) {
693 if (domain && domain_use_first_level(domain)) {
694 if (!cap_fl1gp_support(iommu->cap))
697 mask &= cap_super_page_val(iommu->cap);
709 static int domain_update_device_node(struct dmar_domain *domain)
711 struct device_domain_info *info;
712 int nid = NUMA_NO_NODE;
714 assert_spin_locked(&device_domain_lock);
716 if (list_empty(&domain->devices))
719 list_for_each_entry(info, &domain->devices, link) {
724 * There could possibly be multiple device numa nodes as devices
725 * within the same domain may sit behind different IOMMUs. There
726 * isn't perfect answer in such situation, so we select first
727 * come first served policy.
729 nid = dev_to_node(info->dev);
730 if (nid != NUMA_NO_NODE)
737 static void domain_update_iotlb(struct dmar_domain *domain);
739 /* Some capabilities may be different across iommus */
740 static void domain_update_iommu_cap(struct dmar_domain *domain)
742 domain_update_iommu_coherency(domain);
743 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
744 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
747 * If RHSA is missing, we should default to the device numa domain
750 if (domain->nid == NUMA_NO_NODE)
751 domain->nid = domain_update_device_node(domain);
754 * First-level translation restricts the input-address to a
755 * canonical address (i.e., address bits 63:N have the same
756 * value as address bit [N-1], where N is 48-bits with 4-level
757 * paging and 57-bits with 5-level paging). Hence, skip bit
760 if (domain_use_first_level(domain))
761 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
763 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
765 domain_update_iotlb(domain);
768 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
771 struct root_entry *root = &iommu->root_entry[bus];
772 struct context_entry *context;
776 if (sm_supported(iommu)) {
784 context = phys_to_virt(*entry & VTD_PAGE_MASK);
786 unsigned long phy_addr;
790 context = alloc_pgtable_page(iommu->node);
794 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
795 phy_addr = virt_to_phys((void *)context);
796 *entry = phy_addr | 1;
797 __iommu_flush_cache(iommu, entry, sizeof(*entry));
799 return &context[devfn];
802 static bool attach_deferred(struct device *dev)
804 return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
808 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
809 * sub-hierarchy of a candidate PCI-PCI bridge
810 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
811 * @bridge: the candidate PCI-PCI bridge
813 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
816 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
818 struct pci_dev *pdev, *pbridge;
820 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
823 pdev = to_pci_dev(dev);
824 pbridge = to_pci_dev(bridge);
826 if (pbridge->subordinate &&
827 pbridge->subordinate->number <= pdev->bus->number &&
828 pbridge->subordinate->busn_res.end >= pdev->bus->number)
834 static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
836 struct dmar_drhd_unit *drhd;
840 /* We know that this device on this chipset has its own IOMMU.
841 * If we find it under a different IOMMU, then the BIOS is lying
842 * to us. Hope that the IOMMU for this device is actually
843 * disabled, and it needs no translation...
845 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
848 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
853 /* we know that the this iommu should be at offset 0xa000 from vtbar */
854 drhd = dmar_find_matched_drhd_unit(pdev);
855 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
856 pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
857 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
864 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
866 if (!iommu || iommu->drhd->ignored)
869 if (dev_is_pci(dev)) {
870 struct pci_dev *pdev = to_pci_dev(dev);
872 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
873 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
874 quirk_ioat_snb_local_iommu(pdev))
881 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
883 struct dmar_drhd_unit *drhd = NULL;
884 struct pci_dev *pdev = NULL;
885 struct intel_iommu *iommu;
893 if (dev_is_pci(dev)) {
894 struct pci_dev *pf_pdev;
896 pdev = pci_real_dma_dev(to_pci_dev(dev));
898 /* VFs aren't listed in scope tables; we need to look up
899 * the PF instead to find the IOMMU. */
900 pf_pdev = pci_physfn(pdev);
902 segment = pci_domain_nr(pdev->bus);
903 } else if (has_acpi_companion(dev))
904 dev = &ACPI_COMPANION(dev)->dev;
907 for_each_iommu(iommu, drhd) {
908 if (pdev && segment != drhd->segment)
911 for_each_active_dev_scope(drhd->devices,
912 drhd->devices_cnt, i, tmp) {
914 /* For a VF use its original BDF# not that of the PF
915 * which we used for the IOMMU lookup. Strictly speaking
916 * we could do this for all PCI devices; we only need to
917 * get the BDF# from the scope table for ACPI matches. */
918 if (pdev && pdev->is_virtfn)
922 *bus = drhd->devices[i].bus;
923 *devfn = drhd->devices[i].devfn;
928 if (is_downstream_to_pci_bridge(dev, tmp))
932 if (pdev && drhd->include_all) {
935 *bus = pdev->bus->number;
936 *devfn = pdev->devfn;
943 if (iommu_is_dummy(iommu, dev))
951 static void domain_flush_cache(struct dmar_domain *domain,
952 void *addr, int size)
954 if (!domain->iommu_coherency)
955 clflush_cache_range(addr, size);
958 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
960 struct context_entry *context;
964 spin_lock_irqsave(&iommu->lock, flags);
965 context = iommu_context_addr(iommu, bus, devfn, 0);
967 ret = context_present(context);
968 spin_unlock_irqrestore(&iommu->lock, flags);
972 static void free_context_table(struct intel_iommu *iommu)
976 struct context_entry *context;
978 spin_lock_irqsave(&iommu->lock, flags);
979 if (!iommu->root_entry) {
982 for (i = 0; i < ROOT_ENTRY_NR; i++) {
983 context = iommu_context_addr(iommu, i, 0, 0);
985 free_pgtable_page(context);
987 if (!sm_supported(iommu))
990 context = iommu_context_addr(iommu, i, 0x80, 0);
992 free_pgtable_page(context);
995 free_pgtable_page(iommu->root_entry);
996 iommu->root_entry = NULL;
998 spin_unlock_irqrestore(&iommu->lock, flags);
1001 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1002 unsigned long pfn, int *target_level)
1004 struct dma_pte *parent, *pte;
1005 int level = agaw_to_level(domain->agaw);
1008 BUG_ON(!domain->pgd);
1010 if (!domain_pfn_supported(domain, pfn))
1011 /* Address beyond IOMMU's addressing capabilities. */
1014 parent = domain->pgd;
1019 offset = pfn_level_offset(pfn, level);
1020 pte = &parent[offset];
1021 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1023 if (level == *target_level)
1026 if (!dma_pte_present(pte)) {
1029 tmp_page = alloc_pgtable_page(domain->nid);
1034 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1035 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1036 if (domain_use_first_level(domain)) {
1037 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
1038 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1039 pteval |= DMA_FL_PTE_ACCESS;
1041 if (cmpxchg64(&pte->val, 0ULL, pteval))
1042 /* Someone else set it while we were thinking; use theirs. */
1043 free_pgtable_page(tmp_page);
1045 domain_flush_cache(domain, pte, sizeof(*pte));
1050 parent = phys_to_virt(dma_pte_addr(pte));
1055 *target_level = level;
1060 /* return address's pte at specific level */
1061 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1063 int level, int *large_page)
1065 struct dma_pte *parent, *pte;
1066 int total = agaw_to_level(domain->agaw);
1069 parent = domain->pgd;
1070 while (level <= total) {
1071 offset = pfn_level_offset(pfn, total);
1072 pte = &parent[offset];
1076 if (!dma_pte_present(pte)) {
1077 *large_page = total;
1081 if (dma_pte_superpage(pte)) {
1082 *large_page = total;
1086 parent = phys_to_virt(dma_pte_addr(pte));
1092 /* clear last level pte, a tlb flush should be followed */
1093 static void dma_pte_clear_range(struct dmar_domain *domain,
1094 unsigned long start_pfn,
1095 unsigned long last_pfn)
1097 unsigned int large_page;
1098 struct dma_pte *first_pte, *pte;
1100 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1101 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1102 BUG_ON(start_pfn > last_pfn);
1104 /* we don't need lock here; nobody else touches the iova range */
1107 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1109 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1114 start_pfn += lvl_to_nr_pages(large_page);
1116 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1118 domain_flush_cache(domain, first_pte,
1119 (void *)pte - (void *)first_pte);
1121 } while (start_pfn && start_pfn <= last_pfn);
1124 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1125 int retain_level, struct dma_pte *pte,
1126 unsigned long pfn, unsigned long start_pfn,
1127 unsigned long last_pfn)
1129 pfn = max(start_pfn, pfn);
1130 pte = &pte[pfn_level_offset(pfn, level)];
1133 unsigned long level_pfn;
1134 struct dma_pte *level_pte;
1136 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1139 level_pfn = pfn & level_mask(level);
1140 level_pte = phys_to_virt(dma_pte_addr(pte));
1143 dma_pte_free_level(domain, level - 1, retain_level,
1144 level_pte, level_pfn, start_pfn,
1149 * Free the page table if we're below the level we want to
1150 * retain and the range covers the entire table.
1152 if (level < retain_level && !(start_pfn > level_pfn ||
1153 last_pfn < level_pfn + level_size(level) - 1)) {
1155 domain_flush_cache(domain, pte, sizeof(*pte));
1156 free_pgtable_page(level_pte);
1159 pfn += level_size(level);
1160 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1164 * clear last level (leaf) ptes and free page table pages below the
1165 * level we wish to keep intact.
1167 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1168 unsigned long start_pfn,
1169 unsigned long last_pfn,
1172 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1173 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1174 BUG_ON(start_pfn > last_pfn);
1176 dma_pte_clear_range(domain, start_pfn, last_pfn);
1178 /* We don't need lock here; nobody else touches the iova range */
1179 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1180 domain->pgd, 0, start_pfn, last_pfn);
1183 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1184 free_pgtable_page(domain->pgd);
1189 /* When a page at a given level is being unlinked from its parent, we don't
1190 need to *modify* it at all. All we need to do is make a list of all the
1191 pages which can be freed just as soon as we've flushed the IOTLB and we
1192 know the hardware page-walk will no longer touch them.
1193 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1195 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1196 int level, struct dma_pte *pte,
1197 struct page *freelist)
1201 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1202 pg->freelist = freelist;
1208 pte = page_address(pg);
1210 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1211 freelist = dma_pte_list_pagetables(domain, level - 1,
1214 } while (!first_pte_in_page(pte));
1219 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1220 struct dma_pte *pte, unsigned long pfn,
1221 unsigned long start_pfn,
1222 unsigned long last_pfn,
1223 struct page *freelist)
1225 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1227 pfn = max(start_pfn, pfn);
1228 pte = &pte[pfn_level_offset(pfn, level)];
1231 unsigned long level_pfn;
1233 if (!dma_pte_present(pte))
1236 level_pfn = pfn & level_mask(level);
1238 /* If range covers entire pagetable, free it */
1239 if (start_pfn <= level_pfn &&
1240 last_pfn >= level_pfn + level_size(level) - 1) {
1241 /* These suborbinate page tables are going away entirely. Don't
1242 bother to clear them; we're just going to *free* them. */
1243 if (level > 1 && !dma_pte_superpage(pte))
1244 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1250 } else if (level > 1) {
1251 /* Recurse down into a level that isn't *entirely* obsolete */
1252 freelist = dma_pte_clear_level(domain, level - 1,
1253 phys_to_virt(dma_pte_addr(pte)),
1254 level_pfn, start_pfn, last_pfn,
1258 pfn += level_size(level);
1259 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1262 domain_flush_cache(domain, first_pte,
1263 (void *)++last_pte - (void *)first_pte);
1268 /* We can't just free the pages because the IOMMU may still be walking
1269 the page tables, and may have cached the intermediate levels. The
1270 pages can only be freed after the IOTLB flush has been done. */
1271 static struct page *domain_unmap(struct dmar_domain *domain,
1272 unsigned long start_pfn,
1273 unsigned long last_pfn,
1274 struct page *freelist)
1276 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1277 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1278 BUG_ON(start_pfn > last_pfn);
1280 /* we don't need lock here; nobody else touches the iova range */
1281 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1282 domain->pgd, 0, start_pfn, last_pfn,
1286 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1287 struct page *pgd_page = virt_to_page(domain->pgd);
1288 pgd_page->freelist = freelist;
1289 freelist = pgd_page;
1297 static void dma_free_pagelist(struct page *freelist)
1301 while ((pg = freelist)) {
1302 freelist = pg->freelist;
1303 free_pgtable_page(page_address(pg));
1307 /* iommu handling */
1308 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1310 struct root_entry *root;
1311 unsigned long flags;
1313 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1315 pr_err("Allocating root entry for %s failed\n",
1320 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1322 spin_lock_irqsave(&iommu->lock, flags);
1323 iommu->root_entry = root;
1324 spin_unlock_irqrestore(&iommu->lock, flags);
1329 static void iommu_set_root_entry(struct intel_iommu *iommu)
1335 addr = virt_to_phys(iommu->root_entry);
1336 if (sm_supported(iommu))
1337 addr |= DMA_RTADDR_SMT;
1339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1340 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1342 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1346 readl, (sts & DMA_GSTS_RTPS), sts);
1348 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1350 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1351 if (sm_supported(iommu))
1352 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1353 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1356 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1361 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1364 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1367 /* Make sure hardware complete it */
1368 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1369 readl, (!(val & DMA_GSTS_WBFS)), val);
1371 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1374 /* return value determine if we need a write buffer flush */
1375 static void __iommu_flush_context(struct intel_iommu *iommu,
1376 u16 did, u16 source_id, u8 function_mask,
1383 case DMA_CCMD_GLOBAL_INVL:
1384 val = DMA_CCMD_GLOBAL_INVL;
1386 case DMA_CCMD_DOMAIN_INVL:
1387 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1389 case DMA_CCMD_DEVICE_INVL:
1390 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1391 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1396 val |= DMA_CCMD_ICC;
1398 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1399 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1401 /* Make sure hardware complete it */
1402 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1403 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1405 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1408 /* return value determine if we need a write buffer flush */
1409 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1410 u64 addr, unsigned int size_order, u64 type)
1412 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1413 u64 val = 0, val_iva = 0;
1417 case DMA_TLB_GLOBAL_FLUSH:
1418 /* global flush doesn't need set IVA_REG */
1419 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1421 case DMA_TLB_DSI_FLUSH:
1422 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1424 case DMA_TLB_PSI_FLUSH:
1425 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1426 /* IH bit is passed in as part of address */
1427 val_iva = size_order | addr;
1432 /* Note: set drain read/write */
1435 * This is probably to be super secure.. Looks like we can
1436 * ignore it without any impact.
1438 if (cap_read_drain(iommu->cap))
1439 val |= DMA_TLB_READ_DRAIN;
1441 if (cap_write_drain(iommu->cap))
1442 val |= DMA_TLB_WRITE_DRAIN;
1444 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1445 /* Note: Only uses first TLB reg currently */
1447 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1448 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1450 /* Make sure hardware complete it */
1451 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1452 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1454 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1456 /* check IOTLB invalidation granularity */
1457 if (DMA_TLB_IAIG(val) == 0)
1458 pr_err("Flush IOTLB failed\n");
1459 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1460 pr_debug("TLB flush request %Lx, actual %Lx\n",
1461 (unsigned long long)DMA_TLB_IIRG(type),
1462 (unsigned long long)DMA_TLB_IAIG(val));
1465 static struct device_domain_info *
1466 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1469 struct device_domain_info *info;
1471 assert_spin_locked(&device_domain_lock);
1476 list_for_each_entry(info, &domain->devices, link)
1477 if (info->iommu == iommu && info->bus == bus &&
1478 info->devfn == devfn) {
1479 if (info->ats_supported && info->dev)
1487 static void domain_update_iotlb(struct dmar_domain *domain)
1489 struct device_domain_info *info;
1490 bool has_iotlb_device = false;
1492 assert_spin_locked(&device_domain_lock);
1494 list_for_each_entry(info, &domain->devices, link)
1495 if (info->ats_enabled) {
1496 has_iotlb_device = true;
1500 if (!has_iotlb_device) {
1501 struct subdev_domain_info *sinfo;
1503 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1504 info = get_domain_info(sinfo->pdev);
1505 if (info && info->ats_enabled) {
1506 has_iotlb_device = true;
1512 domain->has_iotlb_device = has_iotlb_device;
1515 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1517 struct pci_dev *pdev;
1519 assert_spin_locked(&device_domain_lock);
1521 if (!info || !dev_is_pci(info->dev))
1524 pdev = to_pci_dev(info->dev);
1525 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1526 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1527 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1528 * reserved, which should be set to 0.
1530 if (!ecap_dit(info->iommu->ecap))
1533 struct pci_dev *pf_pdev;
1535 /* pdev will be returned if device is not a vf */
1536 pf_pdev = pci_physfn(pdev);
1537 info->pfsid = pci_dev_id(pf_pdev);
1540 #ifdef CONFIG_INTEL_IOMMU_SVM
1541 /* The PCIe spec, in its wisdom, declares that the behaviour of
1542 the device if you enable PASID support after ATS support is
1543 undefined. So always enable PASID support on devices which
1544 have it, even if we can't yet know if we're ever going to
1546 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1547 info->pasid_enabled = 1;
1549 if (info->pri_supported &&
1550 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1551 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1552 info->pri_enabled = 1;
1554 if (info->ats_supported && pci_ats_page_aligned(pdev) &&
1555 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1556 info->ats_enabled = 1;
1557 domain_update_iotlb(info->domain);
1558 info->ats_qdep = pci_ats_queue_depth(pdev);
1562 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1564 struct pci_dev *pdev;
1566 assert_spin_locked(&device_domain_lock);
1568 if (!dev_is_pci(info->dev))
1571 pdev = to_pci_dev(info->dev);
1573 if (info->ats_enabled) {
1574 pci_disable_ats(pdev);
1575 info->ats_enabled = 0;
1576 domain_update_iotlb(info->domain);
1578 #ifdef CONFIG_INTEL_IOMMU_SVM
1579 if (info->pri_enabled) {
1580 pci_disable_pri(pdev);
1581 info->pri_enabled = 0;
1583 if (info->pasid_enabled) {
1584 pci_disable_pasid(pdev);
1585 info->pasid_enabled = 0;
1590 static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1591 u64 addr, unsigned int mask)
1595 if (!info || !info->ats_enabled)
1598 sid = info->bus << 8 | info->devfn;
1599 qdep = info->ats_qdep;
1600 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1604 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1605 u64 addr, unsigned mask)
1607 unsigned long flags;
1608 struct device_domain_info *info;
1609 struct subdev_domain_info *sinfo;
1611 if (!domain->has_iotlb_device)
1614 spin_lock_irqsave(&device_domain_lock, flags);
1615 list_for_each_entry(info, &domain->devices, link)
1616 __iommu_flush_dev_iotlb(info, addr, mask);
1618 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1619 info = get_domain_info(sinfo->pdev);
1620 __iommu_flush_dev_iotlb(info, addr, mask);
1622 spin_unlock_irqrestore(&device_domain_lock, flags);
1625 static void domain_flush_piotlb(struct intel_iommu *iommu,
1626 struct dmar_domain *domain,
1627 u64 addr, unsigned long npages, bool ih)
1629 u16 did = domain->iommu_did[iommu->seq_id];
1631 if (domain->default_pasid)
1632 qi_flush_piotlb(iommu, did, domain->default_pasid,
1635 if (!list_empty(&domain->devices))
1636 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1639 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1640 struct dmar_domain *domain,
1641 unsigned long pfn, unsigned int pages,
1644 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1645 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1646 u16 did = domain->iommu_did[iommu->seq_id];
1653 if (domain_use_first_level(domain)) {
1654 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1657 * Fallback to domain selective flush if no PSI support or
1658 * the size is too big. PSI requires page size to be 2 ^ x,
1659 * and the base address is naturally aligned to the size.
1661 if (!cap_pgsel_inv(iommu->cap) ||
1662 mask > cap_max_amask_val(iommu->cap))
1663 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1666 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1671 * In caching mode, changes of pages from non-present to present require
1672 * flush. However, device IOTLB doesn't need to be flushed in this case.
1674 if (!cap_caching_mode(iommu->cap) || !map)
1675 iommu_flush_dev_iotlb(domain, addr, mask);
1678 /* Notification for newly created mappings */
1679 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1680 struct dmar_domain *domain,
1681 unsigned long pfn, unsigned int pages)
1684 * It's a non-present to present mapping. Only flush if caching mode
1687 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
1688 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1690 iommu_flush_write_buffer(iommu);
1693 static void intel_flush_iotlb_all(struct iommu_domain *domain)
1695 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
1698 for_each_domain_iommu(idx, dmar_domain) {
1699 struct intel_iommu *iommu = g_iommus[idx];
1700 u16 did = dmar_domain->iommu_did[iommu->seq_id];
1702 if (domain_use_first_level(dmar_domain))
1703 domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
1705 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1708 if (!cap_caching_mode(iommu->cap))
1709 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1710 0, MAX_AGAW_PFN_WIDTH);
1714 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1717 unsigned long flags;
1719 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1722 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1723 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1724 pmen &= ~DMA_PMEN_EPM;
1725 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1727 /* wait for the protected region status bit to clear */
1728 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1729 readl, !(pmen & DMA_PMEN_PRS), pmen);
1731 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1734 static void iommu_enable_translation(struct intel_iommu *iommu)
1737 unsigned long flags;
1739 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1740 iommu->gcmd |= DMA_GCMD_TE;
1741 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1743 /* Make sure hardware complete it */
1744 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1745 readl, (sts & DMA_GSTS_TES), sts);
1747 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1750 static void iommu_disable_translation(struct intel_iommu *iommu)
1755 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1756 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1759 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1760 iommu->gcmd &= ~DMA_GCMD_TE;
1761 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1763 /* Make sure hardware complete it */
1764 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1765 readl, (!(sts & DMA_GSTS_TES)), sts);
1767 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1770 static int iommu_init_domains(struct intel_iommu *iommu)
1772 u32 ndomains, nlongs;
1775 ndomains = cap_ndoms(iommu->cap);
1776 pr_debug("%s: Number of Domains supported <%d>\n",
1777 iommu->name, ndomains);
1778 nlongs = BITS_TO_LONGS(ndomains);
1780 spin_lock_init(&iommu->lock);
1782 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1783 if (!iommu->domain_ids) {
1784 pr_err("%s: Allocating domain id array failed\n",
1789 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1790 iommu->domains = kzalloc(size, GFP_KERNEL);
1792 if (iommu->domains) {
1793 size = 256 * sizeof(struct dmar_domain *);
1794 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1797 if (!iommu->domains || !iommu->domains[0]) {
1798 pr_err("%s: Allocating domain array failed\n",
1800 kfree(iommu->domain_ids);
1801 kfree(iommu->domains);
1802 iommu->domain_ids = NULL;
1803 iommu->domains = NULL;
1808 * If Caching mode is set, then invalid translations are tagged
1809 * with domain-id 0, hence we need to pre-allocate it. We also
1810 * use domain-id 0 as a marker for non-allocated domain-id, so
1811 * make sure it is not used for a real domain.
1813 set_bit(0, iommu->domain_ids);
1816 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1817 * entry for first-level or pass-through translation modes should
1818 * be programmed with a domain id different from those used for
1819 * second-level or nested translation. We reserve a domain id for
1822 if (sm_supported(iommu))
1823 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1828 static void disable_dmar_iommu(struct intel_iommu *iommu)
1830 struct device_domain_info *info, *tmp;
1831 unsigned long flags;
1833 if (!iommu->domains || !iommu->domain_ids)
1836 spin_lock_irqsave(&device_domain_lock, flags);
1837 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1838 if (info->iommu != iommu)
1841 if (!info->dev || !info->domain)
1844 __dmar_remove_one_dev_info(info);
1846 spin_unlock_irqrestore(&device_domain_lock, flags);
1848 if (iommu->gcmd & DMA_GCMD_TE)
1849 iommu_disable_translation(iommu);
1852 static void free_dmar_iommu(struct intel_iommu *iommu)
1854 if ((iommu->domains) && (iommu->domain_ids)) {
1855 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1858 for (i = 0; i < elems; i++)
1859 kfree(iommu->domains[i]);
1860 kfree(iommu->domains);
1861 kfree(iommu->domain_ids);
1862 iommu->domains = NULL;
1863 iommu->domain_ids = NULL;
1866 g_iommus[iommu->seq_id] = NULL;
1868 /* free context mapping */
1869 free_context_table(iommu);
1871 #ifdef CONFIG_INTEL_IOMMU_SVM
1872 if (pasid_supported(iommu)) {
1873 if (ecap_prs(iommu->ecap))
1874 intel_svm_finish_prq(iommu);
1876 if (vccap_pasid(iommu->vccap))
1877 ioasid_unregister_allocator(&iommu->pasid_allocator);
1883 * Check and return whether first level is used by default for
1886 static bool first_level_by_default(void)
1888 return scalable_mode_support() && intel_cap_flts_sanity();
1891 static struct dmar_domain *alloc_domain(int flags)
1893 struct dmar_domain *domain;
1895 domain = alloc_domain_mem();
1899 memset(domain, 0, sizeof(*domain));
1900 domain->nid = NUMA_NO_NODE;
1901 domain->flags = flags;
1902 if (first_level_by_default())
1903 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
1904 domain->has_iotlb_device = false;
1905 INIT_LIST_HEAD(&domain->devices);
1906 INIT_LIST_HEAD(&domain->subdevices);
1911 /* Must be called with iommu->lock */
1912 static int domain_attach_iommu(struct dmar_domain *domain,
1913 struct intel_iommu *iommu)
1915 unsigned long ndomains;
1918 assert_spin_locked(&device_domain_lock);
1919 assert_spin_locked(&iommu->lock);
1921 domain->iommu_refcnt[iommu->seq_id] += 1;
1922 domain->iommu_count += 1;
1923 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1924 ndomains = cap_ndoms(iommu->cap);
1925 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1927 if (num >= ndomains) {
1928 pr_err("%s: No free domain ids\n", iommu->name);
1929 domain->iommu_refcnt[iommu->seq_id] -= 1;
1930 domain->iommu_count -= 1;
1934 set_bit(num, iommu->domain_ids);
1935 set_iommu_domain(iommu, num, domain);
1937 domain->iommu_did[iommu->seq_id] = num;
1938 domain->nid = iommu->node;
1940 domain_update_iommu_cap(domain);
1946 static int domain_detach_iommu(struct dmar_domain *domain,
1947 struct intel_iommu *iommu)
1951 assert_spin_locked(&device_domain_lock);
1952 assert_spin_locked(&iommu->lock);
1954 domain->iommu_refcnt[iommu->seq_id] -= 1;
1955 count = --domain->iommu_count;
1956 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1957 num = domain->iommu_did[iommu->seq_id];
1958 clear_bit(num, iommu->domain_ids);
1959 set_iommu_domain(iommu, num, NULL);
1961 domain_update_iommu_cap(domain);
1962 domain->iommu_did[iommu->seq_id] = 0;
1968 static inline int guestwidth_to_adjustwidth(int gaw)
1971 int r = (gaw - 12) % 9;
1982 static void domain_exit(struct dmar_domain *domain)
1985 /* Remove associated devices and clear attached or cached domains */
1986 domain_remove_dev_info(domain);
1989 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1990 iommu_put_dma_cookie(&domain->domain);
1993 struct page *freelist;
1995 freelist = domain_unmap(domain, 0,
1996 DOMAIN_MAX_PFN(domain->gaw), NULL);
1997 dma_free_pagelist(freelist);
2000 free_domain_mem(domain);
2004 * Get the PASID directory size for scalable mode context entry.
2005 * Value of X in the PDTS field of a scalable mode context entry
2006 * indicates PASID directory with 2^(X + 7) entries.
2008 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
2012 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
2013 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
2021 * Set the RID_PASID field of a scalable mode context entry. The
2022 * IOMMU hardware will use the PASID value set in this field for
2023 * DMA translations of DMA requests without PASID.
2026 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2028 context->hi |= pasid & ((1 << 20) - 1);
2032 * Set the DTE(Device-TLB Enable) field of a scalable mode context
2035 static inline void context_set_sm_dte(struct context_entry *context)
2037 context->lo |= (1 << 2);
2041 * Set the PRE(Page Request Enable) field of a scalable mode context
2044 static inline void context_set_sm_pre(struct context_entry *context)
2046 context->lo |= (1 << 4);
2049 /* Convert value to context PASID directory size field coding. */
2050 #define context_pdts(pds) (((pds) & 0x7) << 9)
2052 static int domain_context_mapping_one(struct dmar_domain *domain,
2053 struct intel_iommu *iommu,
2054 struct pasid_table *table,
2057 u16 did = domain->iommu_did[iommu->seq_id];
2058 int translation = CONTEXT_TT_MULTI_LEVEL;
2059 struct device_domain_info *info = NULL;
2060 struct context_entry *context;
2061 unsigned long flags;
2066 if (hw_pass_through && domain_type_is_si(domain))
2067 translation = CONTEXT_TT_PASS_THROUGH;
2069 pr_debug("Set context mapping for %02x:%02x.%d\n",
2070 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2072 BUG_ON(!domain->pgd);
2074 spin_lock_irqsave(&device_domain_lock, flags);
2075 spin_lock(&iommu->lock);
2078 context = iommu_context_addr(iommu, bus, devfn, 1);
2083 if (context_present(context))
2087 * For kdump cases, old valid entries may be cached due to the
2088 * in-flight DMA and copied pgtable, but there is no unmapping
2089 * behaviour for them, thus we need an explicit cache flush for
2090 * the newly-mapped device. For kdump, at this point, the device
2091 * is supposed to finish reset at its driver probe stage, so no
2092 * in-flight DMA will exist, and we don't need to worry anymore
2095 if (context_copied(context)) {
2096 u16 did_old = context_domain_id(context);
2098 if (did_old < cap_ndoms(iommu->cap)) {
2099 iommu->flush.flush_context(iommu, did_old,
2100 (((u16)bus) << 8) | devfn,
2101 DMA_CCMD_MASK_NOBIT,
2102 DMA_CCMD_DEVICE_INVL);
2103 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2108 context_clear_entry(context);
2110 if (sm_supported(iommu)) {
2115 /* Setup the PASID DIR pointer: */
2116 pds = context_get_sm_pds(table);
2117 context->lo = (u64)virt_to_phys(table->table) |
2120 /* Setup the RID_PASID field: */
2121 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2124 * Setup the Device-TLB enable bit and Page request
2127 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2128 if (info && info->ats_supported)
2129 context_set_sm_dte(context);
2130 if (info && info->pri_supported)
2131 context_set_sm_pre(context);
2133 struct dma_pte *pgd = domain->pgd;
2136 context_set_domain_id(context, did);
2138 if (translation != CONTEXT_TT_PASS_THROUGH) {
2140 * Skip top levels of page tables for iommu which has
2141 * less agaw than default. Unnecessary for PT mode.
2143 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2145 pgd = phys_to_virt(dma_pte_addr(pgd));
2146 if (!dma_pte_present(pgd))
2150 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2151 if (info && info->ats_supported)
2152 translation = CONTEXT_TT_DEV_IOTLB;
2154 translation = CONTEXT_TT_MULTI_LEVEL;
2156 context_set_address_root(context, virt_to_phys(pgd));
2157 context_set_address_width(context, agaw);
2160 * In pass through mode, AW must be programmed to
2161 * indicate the largest AGAW value supported by
2162 * hardware. And ASR is ignored by hardware.
2164 context_set_address_width(context, iommu->msagaw);
2167 context_set_translation_type(context, translation);
2170 context_set_fault_enable(context);
2171 context_set_present(context);
2172 if (!ecap_coherent(iommu->ecap))
2173 clflush_cache_range(context, sizeof(*context));
2176 * It's a non-present to present mapping. If hardware doesn't cache
2177 * non-present entry we only need to flush the write-buffer. If the
2178 * _does_ cache non-present entries, then it does so in the special
2179 * domain #0, which we have to flush:
2181 if (cap_caching_mode(iommu->cap)) {
2182 iommu->flush.flush_context(iommu, 0,
2183 (((u16)bus) << 8) | devfn,
2184 DMA_CCMD_MASK_NOBIT,
2185 DMA_CCMD_DEVICE_INVL);
2186 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2188 iommu_flush_write_buffer(iommu);
2190 iommu_enable_dev_iotlb(info);
2195 spin_unlock(&iommu->lock);
2196 spin_unlock_irqrestore(&device_domain_lock, flags);
2201 struct domain_context_mapping_data {
2202 struct dmar_domain *domain;
2203 struct intel_iommu *iommu;
2204 struct pasid_table *table;
2207 static int domain_context_mapping_cb(struct pci_dev *pdev,
2208 u16 alias, void *opaque)
2210 struct domain_context_mapping_data *data = opaque;
2212 return domain_context_mapping_one(data->domain, data->iommu,
2213 data->table, PCI_BUS_NUM(alias),
2218 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2220 struct domain_context_mapping_data data;
2221 struct pasid_table *table;
2222 struct intel_iommu *iommu;
2225 iommu = device_to_iommu(dev, &bus, &devfn);
2229 table = intel_pasid_get_table(dev);
2231 if (!dev_is_pci(dev))
2232 return domain_context_mapping_one(domain, iommu, table,
2235 data.domain = domain;
2239 return pci_for_each_dma_alias(to_pci_dev(dev),
2240 &domain_context_mapping_cb, &data);
2243 static int domain_context_mapped_cb(struct pci_dev *pdev,
2244 u16 alias, void *opaque)
2246 struct intel_iommu *iommu = opaque;
2248 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2251 static int domain_context_mapped(struct device *dev)
2253 struct intel_iommu *iommu;
2256 iommu = device_to_iommu(dev, &bus, &devfn);
2260 if (!dev_is_pci(dev))
2261 return device_context_mapped(iommu, bus, devfn);
2263 return !pci_for_each_dma_alias(to_pci_dev(dev),
2264 domain_context_mapped_cb, iommu);
2267 /* Returns a number of VTD pages, but aligned to MM page size */
2268 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2271 host_addr &= ~PAGE_MASK;
2272 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2275 /* Return largest possible superpage level for a given mapping */
2276 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2277 unsigned long iov_pfn,
2278 unsigned long phy_pfn,
2279 unsigned long pages)
2281 int support, level = 1;
2282 unsigned long pfnmerge;
2284 support = domain->iommu_superpage;
2286 /* To use a large page, the virtual *and* physical addresses
2287 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2288 of them will mean we have to use smaller pages. So just
2289 merge them and check both at once. */
2290 pfnmerge = iov_pfn | phy_pfn;
2292 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2293 pages >>= VTD_STRIDE_SHIFT;
2296 pfnmerge >>= VTD_STRIDE_SHIFT;
2304 * Ensure that old small page tables are removed to make room for superpage(s).
2305 * We're going to add new large pages, so make sure we don't remove their parent
2306 * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
2308 static void switch_to_super_page(struct dmar_domain *domain,
2309 unsigned long start_pfn,
2310 unsigned long end_pfn, int level)
2312 unsigned long lvl_pages = lvl_to_nr_pages(level);
2313 struct dma_pte *pte = NULL;
2316 while (start_pfn <= end_pfn) {
2318 pte = pfn_to_dma_pte(domain, start_pfn, &level);
2320 if (dma_pte_present(pte)) {
2321 dma_pte_free_pagetable(domain, start_pfn,
2322 start_pfn + lvl_pages - 1,
2325 for_each_domain_iommu(i, domain)
2326 iommu_flush_iotlb_psi(g_iommus[i], domain,
2327 start_pfn, lvl_pages,
2332 start_pfn += lvl_pages;
2333 if (first_pte_in_page(pte))
2339 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2340 unsigned long phys_pfn, unsigned long nr_pages, int prot)
2342 unsigned int largepage_lvl = 0;
2343 unsigned long lvl_pages = 0;
2344 struct dma_pte *pte = NULL;
2348 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2350 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2353 attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2354 attr |= DMA_FL_PTE_PRESENT;
2355 if (domain_use_first_level(domain)) {
2356 attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
2358 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
2359 attr |= DMA_FL_PTE_ACCESS;
2360 if (prot & DMA_PTE_WRITE)
2361 attr |= DMA_FL_PTE_DIRTY;
2365 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
2367 while (nr_pages > 0) {
2371 largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
2372 phys_pfn, nr_pages);
2374 pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2377 /* It is large page*/
2378 if (largepage_lvl > 1) {
2379 unsigned long end_pfn;
2381 pteval |= DMA_PTE_LARGE_PAGE;
2382 end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
2383 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
2385 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2389 /* We don't need lock here, nobody else
2390 * touches the iova range
2392 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2394 static int dumps = 5;
2395 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2396 iov_pfn, tmp, (unsigned long long)pteval);
2399 debug_dma_dump_mappings(NULL);
2404 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2406 BUG_ON(nr_pages < lvl_pages);
2408 nr_pages -= lvl_pages;
2409 iov_pfn += lvl_pages;
2410 phys_pfn += lvl_pages;
2411 pteval += lvl_pages * VTD_PAGE_SIZE;
2413 /* If the next PTE would be the first in a new page, then we
2414 * need to flush the cache on the entries we've just written.
2415 * And then we'll need to recalculate 'pte', so clear it and
2416 * let it get set again in the if (!pte) block above.
2418 * If we're done (!nr_pages) we need to flush the cache too.
2420 * Also if we've been setting superpages, we may need to
2421 * recalculate 'pte' and switch back to smaller pages for the
2422 * end of the mapping, if the trailing size is not enough to
2423 * use another superpage (i.e. nr_pages < lvl_pages).
2425 * We leave clflush for the leaf pte changes to iotlb_sync_map()
2429 if (!nr_pages || first_pte_in_page(pte) ||
2430 (largepage_lvl > 1 && nr_pages < lvl_pages))
2437 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2439 unsigned long flags;
2440 struct context_entry *context;
2446 spin_lock_irqsave(&iommu->lock, flags);
2447 context = iommu_context_addr(iommu, bus, devfn, 0);
2449 spin_unlock_irqrestore(&iommu->lock, flags);
2452 did_old = context_domain_id(context);
2453 context_clear_entry(context);
2454 __iommu_flush_cache(iommu, context, sizeof(*context));
2455 spin_unlock_irqrestore(&iommu->lock, flags);
2456 iommu->flush.flush_context(iommu,
2458 (((u16)bus) << 8) | devfn,
2459 DMA_CCMD_MASK_NOBIT,
2460 DMA_CCMD_DEVICE_INVL);
2462 if (sm_supported(iommu))
2463 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
2465 iommu->flush.flush_iotlb(iommu,
2472 static inline void unlink_domain_info(struct device_domain_info *info)
2474 assert_spin_locked(&device_domain_lock);
2475 list_del(&info->link);
2476 list_del(&info->global);
2478 dev_iommu_priv_set(info->dev, NULL);
2481 static void domain_remove_dev_info(struct dmar_domain *domain)
2483 struct device_domain_info *info, *tmp;
2484 unsigned long flags;
2486 spin_lock_irqsave(&device_domain_lock, flags);
2487 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2488 __dmar_remove_one_dev_info(info);
2489 spin_unlock_irqrestore(&device_domain_lock, flags);
2492 struct dmar_domain *find_domain(struct device *dev)
2494 struct device_domain_info *info;
2496 if (unlikely(!dev || !dev->iommu))
2499 if (unlikely(attach_deferred(dev)))
2502 /* No lock here, assumes no domain exit in normal case */
2503 info = get_domain_info(dev);
2505 return info->domain;
2510 static inline struct device_domain_info *
2511 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2513 struct device_domain_info *info;
2515 list_for_each_entry(info, &device_domain_list, global)
2516 if (info->segment == segment && info->bus == bus &&
2517 info->devfn == devfn)
2523 static int domain_setup_first_level(struct intel_iommu *iommu,
2524 struct dmar_domain *domain,
2528 struct dma_pte *pgd = domain->pgd;
2533 * Skip top levels of page tables for iommu which has
2534 * less agaw than default. Unnecessary for PT mode.
2536 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2537 pgd = phys_to_virt(dma_pte_addr(pgd));
2538 if (!dma_pte_present(pgd))
2542 level = agaw_to_level(agaw);
2543 if (level != 4 && level != 5)
2546 if (pasid != PASID_RID2PASID)
2547 flags |= PASID_FLAG_SUPERVISOR_MODE;
2549 flags |= PASID_FLAG_FL5LP;
2551 if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
2552 flags |= PASID_FLAG_PAGE_SNOOP;
2554 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2555 domain->iommu_did[iommu->seq_id],
2559 static bool dev_is_real_dma_subdevice(struct device *dev)
2561 return dev && dev_is_pci(dev) &&
2562 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2565 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2568 struct dmar_domain *domain)
2570 struct dmar_domain *found = NULL;
2571 struct device_domain_info *info;
2572 unsigned long flags;
2575 info = alloc_devinfo_mem();
2579 if (!dev_is_real_dma_subdevice(dev)) {
2581 info->devfn = devfn;
2582 info->segment = iommu->segment;
2584 struct pci_dev *pdev = to_pci_dev(dev);
2586 info->bus = pdev->bus->number;
2587 info->devfn = pdev->devfn;
2588 info->segment = pci_domain_nr(pdev->bus);
2591 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2592 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2595 info->domain = domain;
2596 info->iommu = iommu;
2597 info->pasid_table = NULL;
2598 info->auxd_enabled = 0;
2599 INIT_LIST_HEAD(&info->subdevices);
2601 if (dev && dev_is_pci(dev)) {
2602 struct pci_dev *pdev = to_pci_dev(info->dev);
2604 if (ecap_dev_iotlb_support(iommu->ecap) &&
2605 pci_ats_supported(pdev) &&
2606 dmar_find_matched_atsr_unit(pdev))
2607 info->ats_supported = 1;
2609 if (sm_supported(iommu)) {
2610 if (pasid_supported(iommu)) {
2611 int features = pci_pasid_features(pdev);
2613 info->pasid_supported = features | 1;
2616 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2617 pci_pri_supported(pdev))
2618 info->pri_supported = 1;
2622 spin_lock_irqsave(&device_domain_lock, flags);
2624 found = find_domain(dev);
2627 struct device_domain_info *info2;
2628 info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
2631 found = info2->domain;
2637 spin_unlock_irqrestore(&device_domain_lock, flags);
2638 free_devinfo_mem(info);
2639 /* Caller must free the original domain */
2643 spin_lock(&iommu->lock);
2644 ret = domain_attach_iommu(domain, iommu);
2645 spin_unlock(&iommu->lock);
2648 spin_unlock_irqrestore(&device_domain_lock, flags);
2649 free_devinfo_mem(info);
2653 list_add(&info->link, &domain->devices);
2654 list_add(&info->global, &device_domain_list);
2656 dev_iommu_priv_set(dev, info);
2657 spin_unlock_irqrestore(&device_domain_lock, flags);
2659 /* PASID table is mandatory for a PCI device in scalable mode. */
2660 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2661 ret = intel_pasid_alloc_table(dev);
2663 dev_err(dev, "PASID table allocation failed\n");
2664 dmar_remove_one_dev_info(dev);
2668 /* Setup the PASID entry for requests without PASID: */
2669 spin_lock_irqsave(&iommu->lock, flags);
2670 if (hw_pass_through && domain_type_is_si(domain))
2671 ret = intel_pasid_setup_pass_through(iommu, domain,
2672 dev, PASID_RID2PASID);
2673 else if (domain_use_first_level(domain))
2674 ret = domain_setup_first_level(iommu, domain, dev,
2677 ret = intel_pasid_setup_second_level(iommu, domain,
2678 dev, PASID_RID2PASID);
2679 spin_unlock_irqrestore(&iommu->lock, flags);
2681 dev_err(dev, "Setup RID2PASID failed\n");
2682 dmar_remove_one_dev_info(dev);
2687 if (dev && domain_context_mapping(domain, dev)) {
2688 dev_err(dev, "Domain context map failed\n");
2689 dmar_remove_one_dev_info(dev);
2696 static int iommu_domain_identity_map(struct dmar_domain *domain,
2697 unsigned long first_vpfn,
2698 unsigned long last_vpfn)
2701 * RMRR range might have overlap with physical memory range,
2704 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2706 return __domain_mapping(domain, first_vpfn,
2707 first_vpfn, last_vpfn - first_vpfn + 1,
2708 DMA_PTE_READ|DMA_PTE_WRITE);
2711 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2713 static int __init si_domain_init(int hw)
2715 struct dmar_rmrr_unit *rmrr;
2719 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2723 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2724 domain_exit(si_domain);
2731 for_each_online_node(nid) {
2732 unsigned long start_pfn, end_pfn;
2735 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2736 ret = iommu_domain_identity_map(si_domain,
2737 mm_to_dma_pfn(start_pfn),
2738 mm_to_dma_pfn(end_pfn));
2745 * Identity map the RMRRs so that devices with RMRRs could also use
2748 for_each_rmrr_units(rmrr) {
2749 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2751 unsigned long long start = rmrr->base_address;
2752 unsigned long long end = rmrr->end_address;
2754 if (WARN_ON(end < start ||
2755 end >> agaw_to_width(si_domain->agaw)))
2758 ret = iommu_domain_identity_map(si_domain,
2759 mm_to_dma_pfn(start >> PAGE_SHIFT),
2760 mm_to_dma_pfn(end >> PAGE_SHIFT));
2769 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2771 struct dmar_domain *ndomain;
2772 struct intel_iommu *iommu;
2775 iommu = device_to_iommu(dev, &bus, &devfn);
2779 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2780 if (ndomain != domain)
2786 static bool device_has_rmrr(struct device *dev)
2788 struct dmar_rmrr_unit *rmrr;
2793 for_each_rmrr_units(rmrr) {
2795 * Return TRUE if this RMRR contains the device that
2798 for_each_active_dev_scope(rmrr->devices,
2799 rmrr->devices_cnt, i, tmp)
2801 is_downstream_to_pci_bridge(dev, tmp)) {
2811 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2812 * is relaxable (ie. is allowed to be not enforced under some conditions)
2813 * @dev: device handle
2815 * We assume that PCI USB devices with RMRRs have them largely
2816 * for historical reasons and that the RMRR space is not actively used post
2817 * boot. This exclusion may change if vendors begin to abuse it.
2819 * The same exception is made for graphics devices, with the requirement that
2820 * any use of the RMRR regions will be torn down before assigning the device
2823 * Return: true if the RMRR is relaxable, false otherwise
2825 static bool device_rmrr_is_relaxable(struct device *dev)
2827 struct pci_dev *pdev;
2829 if (!dev_is_pci(dev))
2832 pdev = to_pci_dev(dev);
2833 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2840 * There are a couple cases where we need to restrict the functionality of
2841 * devices associated with RMRRs. The first is when evaluating a device for
2842 * identity mapping because problems exist when devices are moved in and out
2843 * of domains and their respective RMRR information is lost. This means that
2844 * a device with associated RMRRs will never be in a "passthrough" domain.
2845 * The second is use of the device through the IOMMU API. This interface
2846 * expects to have full control of the IOVA space for the device. We cannot
2847 * satisfy both the requirement that RMRR access is maintained and have an
2848 * unencumbered IOVA space. We also have no ability to quiesce the device's
2849 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2850 * We therefore prevent devices associated with an RMRR from participating in
2851 * the IOMMU API, which eliminates them from device assignment.
2853 * In both cases, devices which have relaxable RMRRs are not concerned by this
2854 * restriction. See device_rmrr_is_relaxable comment.
2856 static bool device_is_rmrr_locked(struct device *dev)
2858 if (!device_has_rmrr(dev))
2861 if (device_rmrr_is_relaxable(dev))
2868 * Return the required default domain type for a specific device.
2870 * @dev: the device in query
2871 * @startup: true if this is during early boot
2874 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2875 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2876 * - 0: both identity and dynamic domains work for this device
2878 static int device_def_domain_type(struct device *dev)
2880 if (dev_is_pci(dev)) {
2881 struct pci_dev *pdev = to_pci_dev(dev);
2883 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2884 return IOMMU_DOMAIN_IDENTITY;
2886 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2887 return IOMMU_DOMAIN_IDENTITY;
2893 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2896 * Start from the sane iommu hardware state.
2897 * If the queued invalidation is already initialized by us
2898 * (for example, while enabling interrupt-remapping) then
2899 * we got the things already rolling from a sane state.
2903 * Clear any previous faults.
2905 dmar_fault(-1, iommu);
2907 * Disable queued invalidation if supported and already enabled
2908 * before OS handover.
2910 dmar_disable_qi(iommu);
2913 if (dmar_enable_qi(iommu)) {
2915 * Queued Invalidate not enabled, use Register Based Invalidate
2917 iommu->flush.flush_context = __iommu_flush_context;
2918 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2919 pr_info("%s: Using Register based invalidation\n",
2922 iommu->flush.flush_context = qi_flush_context;
2923 iommu->flush.flush_iotlb = qi_flush_iotlb;
2924 pr_info("%s: Using Queued invalidation\n", iommu->name);
2928 static int copy_context_table(struct intel_iommu *iommu,
2929 struct root_entry *old_re,
2930 struct context_entry **tbl,
2933 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2934 struct context_entry *new_ce = NULL, ce;
2935 struct context_entry *old_ce = NULL;
2936 struct root_entry re;
2937 phys_addr_t old_ce_phys;
2939 tbl_idx = ext ? bus * 2 : bus;
2940 memcpy(&re, old_re, sizeof(re));
2942 for (devfn = 0; devfn < 256; devfn++) {
2943 /* First calculate the correct index */
2944 idx = (ext ? devfn * 2 : devfn) % 256;
2947 /* First save what we may have and clean up */
2949 tbl[tbl_idx] = new_ce;
2950 __iommu_flush_cache(iommu, new_ce,
2960 old_ce_phys = root_entry_lctp(&re);
2962 old_ce_phys = root_entry_uctp(&re);
2965 if (ext && devfn == 0) {
2966 /* No LCTP, try UCTP */
2975 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2980 new_ce = alloc_pgtable_page(iommu->node);
2987 /* Now copy the context entry */
2988 memcpy(&ce, old_ce + idx, sizeof(ce));
2990 if (!__context_present(&ce))
2993 did = context_domain_id(&ce);
2994 if (did >= 0 && did < cap_ndoms(iommu->cap))
2995 set_bit(did, iommu->domain_ids);
2998 * We need a marker for copied context entries. This
2999 * marker needs to work for the old format as well as
3000 * for extended context entries.
3002 * Bit 67 of the context entry is used. In the old
3003 * format this bit is available to software, in the
3004 * extended format it is the PGE bit, but PGE is ignored
3005 * by HW if PASIDs are disabled (and thus still
3008 * So disable PASIDs first and then mark the entry
3009 * copied. This means that we don't copy PASID
3010 * translations from the old kernel, but this is fine as
3011 * faults there are not fatal.
3013 context_clear_pasid_enable(&ce);
3014 context_set_copied(&ce);
3019 tbl[tbl_idx + pos] = new_ce;
3021 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3030 static int copy_translation_tables(struct intel_iommu *iommu)
3032 struct context_entry **ctxt_tbls;
3033 struct root_entry *old_rt;
3034 phys_addr_t old_rt_phys;
3035 int ctxt_table_entries;
3036 unsigned long flags;
3041 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3042 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3043 new_ext = !!ecap_ecs(iommu->ecap);
3046 * The RTT bit can only be changed when translation is disabled,
3047 * but disabling translation means to open a window for data
3048 * corruption. So bail out and don't copy anything if we would
3049 * have to change the bit.
3054 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3058 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3062 /* This is too big for the stack - allocate it from slab */
3063 ctxt_table_entries = ext ? 512 : 256;
3065 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3069 for (bus = 0; bus < 256; bus++) {
3070 ret = copy_context_table(iommu, &old_rt[bus],
3071 ctxt_tbls, bus, ext);
3073 pr_err("%s: Failed to copy context table for bus %d\n",
3079 spin_lock_irqsave(&iommu->lock, flags);
3081 /* Context tables are copied, now write them to the root_entry table */
3082 for (bus = 0; bus < 256; bus++) {
3083 int idx = ext ? bus * 2 : bus;
3086 if (ctxt_tbls[idx]) {
3087 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3088 iommu->root_entry[bus].lo = val;
3091 if (!ext || !ctxt_tbls[idx + 1])
3094 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3095 iommu->root_entry[bus].hi = val;
3098 spin_unlock_irqrestore(&iommu->lock, flags);
3102 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3112 #ifdef CONFIG_INTEL_IOMMU_SVM
3113 static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
3115 struct intel_iommu *iommu = data;
3119 return INVALID_IOASID;
3121 * VT-d virtual command interface always uses the full 20 bit
3122 * PASID range. Host can partition guest PASID range based on
3123 * policies but it is out of guest's control.
3125 if (min < PASID_MIN || max > intel_pasid_max_id)
3126 return INVALID_IOASID;
3128 if (vcmd_alloc_pasid(iommu, &ioasid))
3129 return INVALID_IOASID;
3134 static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
3136 struct intel_iommu *iommu = data;
3141 * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
3142 * We can only free the PASID when all the devices are unbound.
3144 if (ioasid_find(NULL, ioasid, NULL)) {
3145 pr_alert("Cannot free active IOASID %d\n", ioasid);
3148 vcmd_free_pasid(iommu, ioasid);
3151 static void register_pasid_allocator(struct intel_iommu *iommu)
3154 * If we are running in the host, no need for custom allocator
3155 * in that PASIDs are allocated from the host system-wide.
3157 if (!cap_caching_mode(iommu->cap))
3160 if (!sm_supported(iommu)) {
3161 pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
3166 * Register a custom PASID allocator if we are running in a guest,
3167 * guest PASID must be obtained via virtual command interface.
3168 * There can be multiple vIOMMUs in each guest but only one allocator
3169 * is active. All vIOMMU allocators will eventually be calling the same
3172 if (!vccap_pasid(iommu->vccap))
3175 pr_info("Register custom PASID allocator\n");
3176 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3177 iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3178 iommu->pasid_allocator.pdata = (void *)iommu;
3179 if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3180 pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
3182 * Disable scalable mode on this IOMMU if there
3183 * is no custom allocator. Mixing SM capable vIOMMU
3184 * and non-SM vIOMMU are not supported.
3191 static int __init init_dmars(void)
3193 struct dmar_drhd_unit *drhd;
3194 struct intel_iommu *iommu;
3200 * initialize and program root entry to not present
3203 for_each_drhd_unit(drhd) {
3205 * lock not needed as this is only incremented in the single
3206 * threaded kernel __init code path all other access are read
3209 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3213 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3216 /* Preallocate enough resources for IOMMU hot-addition */
3217 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3218 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3220 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3223 pr_err("Allocating global iommu array failed\n");
3228 ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
3232 for_each_iommu(iommu, drhd) {
3233 if (drhd->ignored) {
3234 iommu_disable_translation(iommu);
3239 * Find the max pasid size of all IOMMU's in the system.
3240 * We need to ensure the system pasid table is no bigger
3241 * than the smallest supported.
3243 if (pasid_supported(iommu)) {
3244 u32 temp = 2 << ecap_pss(iommu->ecap);
3246 intel_pasid_max_id = min_t(u32, temp,
3247 intel_pasid_max_id);
3250 g_iommus[iommu->seq_id] = iommu;
3252 intel_iommu_init_qi(iommu);
3254 ret = iommu_init_domains(iommu);
3258 init_translation_status(iommu);
3260 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3261 iommu_disable_translation(iommu);
3262 clear_translation_pre_enabled(iommu);
3263 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3269 * we could share the same root & context tables
3270 * among all IOMMU's. Need to Split it later.
3272 ret = iommu_alloc_root_entry(iommu);
3276 if (translation_pre_enabled(iommu)) {
3277 pr_info("Translation already enabled - trying to copy translation structures\n");
3279 ret = copy_translation_tables(iommu);
3282 * We found the IOMMU with translation
3283 * enabled - but failed to copy over the
3284 * old root-entry table. Try to proceed
3285 * by disabling translation now and
3286 * allocating a clean root-entry table.
3287 * This might cause DMAR faults, but
3288 * probably the dump will still succeed.
3290 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3292 iommu_disable_translation(iommu);
3293 clear_translation_pre_enabled(iommu);
3295 pr_info("Copied translation tables from previous kernel for %s\n",
3300 if (!ecap_pass_through(iommu->ecap))
3301 hw_pass_through = 0;
3302 intel_svm_check(iommu);
3306 * Now that qi is enabled on all iommus, set the root entry and flush
3307 * caches. This is required on some Intel X58 chipsets, otherwise the
3308 * flush_context function will loop forever and the boot hangs.
3310 for_each_active_iommu(iommu, drhd) {
3311 iommu_flush_write_buffer(iommu);
3312 #ifdef CONFIG_INTEL_IOMMU_SVM
3313 register_pasid_allocator(iommu);
3315 iommu_set_root_entry(iommu);
3318 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3323 iommu_identity_mapping |= IDENTMAP_GFX;
3325 check_tylersburg_isoch();
3327 ret = si_domain_init(hw_pass_through);
3334 * global invalidate context cache
3335 * global invalidate iotlb
3336 * enable translation
3338 for_each_iommu(iommu, drhd) {
3339 if (drhd->ignored) {
3341 * we always have to disable PMRs or DMA may fail on
3345 iommu_disable_protect_mem_regions(iommu);
3349 iommu_flush_write_buffer(iommu);
3351 #ifdef CONFIG_INTEL_IOMMU_SVM
3352 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3354 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3355 * could cause possible lock race condition.
3357 up_write(&dmar_global_lock);
3358 ret = intel_svm_enable_prq(iommu);
3359 down_write(&dmar_global_lock);
3364 ret = dmar_set_interrupt(iommu);
3372 for_each_active_iommu(iommu, drhd) {
3373 disable_dmar_iommu(iommu);
3374 free_dmar_iommu(iommu);
3383 static inline int iommu_domain_cache_init(void)
3387 iommu_domain_cache = kmem_cache_create("iommu_domain",
3388 sizeof(struct dmar_domain),
3393 if (!iommu_domain_cache) {
3394 pr_err("Couldn't create iommu_domain cache\n");
3401 static inline int iommu_devinfo_cache_init(void)
3405 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3406 sizeof(struct device_domain_info),
3410 if (!iommu_devinfo_cache) {
3411 pr_err("Couldn't create devinfo cache\n");
3418 static int __init iommu_init_mempool(void)
3421 ret = iova_cache_get();
3425 ret = iommu_domain_cache_init();
3429 ret = iommu_devinfo_cache_init();
3433 kmem_cache_destroy(iommu_domain_cache);
3440 static void __init iommu_exit_mempool(void)
3442 kmem_cache_destroy(iommu_devinfo_cache);
3443 kmem_cache_destroy(iommu_domain_cache);
3447 static void __init init_no_remapping_devices(void)
3449 struct dmar_drhd_unit *drhd;
3453 for_each_drhd_unit(drhd) {
3454 if (!drhd->include_all) {
3455 for_each_active_dev_scope(drhd->devices,
3456 drhd->devices_cnt, i, dev)
3458 /* ignore DMAR unit if no devices exist */
3459 if (i == drhd->devices_cnt)
3464 for_each_active_drhd_unit(drhd) {
3465 if (drhd->include_all)
3468 for_each_active_dev_scope(drhd->devices,
3469 drhd->devices_cnt, i, dev)
3470 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3472 if (i < drhd->devices_cnt)
3475 /* This IOMMU has *only* gfx devices. Either bypass it or
3476 set the gfx_mapped flag, as appropriate */
3477 drhd->gfx_dedicated = 1;
3483 #ifdef CONFIG_SUSPEND
3484 static int init_iommu_hw(void)
3486 struct dmar_drhd_unit *drhd;
3487 struct intel_iommu *iommu = NULL;
3489 for_each_active_iommu(iommu, drhd)
3491 dmar_reenable_qi(iommu);
3493 for_each_iommu(iommu, drhd) {
3494 if (drhd->ignored) {
3496 * we always have to disable PMRs or DMA may fail on
3500 iommu_disable_protect_mem_regions(iommu);
3504 iommu_flush_write_buffer(iommu);
3505 iommu_set_root_entry(iommu);
3506 iommu_enable_translation(iommu);
3507 iommu_disable_protect_mem_regions(iommu);
3513 static void iommu_flush_all(void)
3515 struct dmar_drhd_unit *drhd;
3516 struct intel_iommu *iommu;
3518 for_each_active_iommu(iommu, drhd) {
3519 iommu->flush.flush_context(iommu, 0, 0, 0,
3520 DMA_CCMD_GLOBAL_INVL);
3521 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3522 DMA_TLB_GLOBAL_FLUSH);
3526 static int iommu_suspend(void)
3528 struct dmar_drhd_unit *drhd;
3529 struct intel_iommu *iommu = NULL;
3532 for_each_active_iommu(iommu, drhd) {
3533 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
3535 if (!iommu->iommu_state)
3541 for_each_active_iommu(iommu, drhd) {
3542 iommu_disable_translation(iommu);
3544 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3546 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3547 readl(iommu->reg + DMAR_FECTL_REG);
3548 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3549 readl(iommu->reg + DMAR_FEDATA_REG);
3550 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3551 readl(iommu->reg + DMAR_FEADDR_REG);
3552 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3553 readl(iommu->reg + DMAR_FEUADDR_REG);
3555 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3560 for_each_active_iommu(iommu, drhd)
3561 kfree(iommu->iommu_state);
3566 static void iommu_resume(void)
3568 struct dmar_drhd_unit *drhd;
3569 struct intel_iommu *iommu = NULL;
3572 if (init_iommu_hw()) {
3574 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3576 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3580 for_each_active_iommu(iommu, drhd) {
3582 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3584 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3585 iommu->reg + DMAR_FECTL_REG);
3586 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3587 iommu->reg + DMAR_FEDATA_REG);
3588 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3589 iommu->reg + DMAR_FEADDR_REG);
3590 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3591 iommu->reg + DMAR_FEUADDR_REG);
3593 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3596 for_each_active_iommu(iommu, drhd)
3597 kfree(iommu->iommu_state);
3600 static struct syscore_ops iommu_syscore_ops = {
3601 .resume = iommu_resume,
3602 .suspend = iommu_suspend,
3605 static void __init init_iommu_pm_ops(void)
3607 register_syscore_ops(&iommu_syscore_ops);
3611 static inline void init_iommu_pm_ops(void) {}
3612 #endif /* CONFIG_PM */
3614 static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
3616 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
3617 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
3618 rmrr->end_address <= rmrr->base_address ||
3619 arch_rmrr_sanity_check(rmrr))
3625 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3627 struct acpi_dmar_reserved_memory *rmrr;
3628 struct dmar_rmrr_unit *rmrru;
3630 rmrr = (struct acpi_dmar_reserved_memory *)header;
3631 if (rmrr_sanity_check(rmrr)) {
3633 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
3634 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3635 rmrr->base_address, rmrr->end_address,
3636 dmi_get_system_info(DMI_BIOS_VENDOR),
3637 dmi_get_system_info(DMI_BIOS_VERSION),
3638 dmi_get_system_info(DMI_PRODUCT_VERSION));
3639 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
3642 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3646 rmrru->hdr = header;
3648 rmrru->base_address = rmrr->base_address;
3649 rmrru->end_address = rmrr->end_address;
3651 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3652 ((void *)rmrr) + rmrr->header.length,
3653 &rmrru->devices_cnt);
3654 if (rmrru->devices_cnt && rmrru->devices == NULL)
3657 list_add(&rmrru->list, &dmar_rmrr_units);
3666 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3668 struct dmar_atsr_unit *atsru;
3669 struct acpi_dmar_atsr *tmp;
3671 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
3673 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3674 if (atsr->segment != tmp->segment)
3676 if (atsr->header.length != tmp->header.length)
3678 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3685 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3687 struct acpi_dmar_atsr *atsr;
3688 struct dmar_atsr_unit *atsru;
3690 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3693 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3694 atsru = dmar_find_atsr(atsr);
3698 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3703 * If memory is allocated from slab by ACPI _DSM method, we need to
3704 * copy the memory content because the memory buffer will be freed
3707 atsru->hdr = (void *)(atsru + 1);
3708 memcpy(atsru->hdr, hdr, hdr->length);
3709 atsru->include_all = atsr->flags & 0x1;
3710 if (!atsru->include_all) {
3711 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3712 (void *)atsr + atsr->header.length,
3713 &atsru->devices_cnt);
3714 if (atsru->devices_cnt && atsru->devices == NULL) {
3720 list_add_rcu(&atsru->list, &dmar_atsr_units);
3725 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3727 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3731 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3733 struct acpi_dmar_atsr *atsr;
3734 struct dmar_atsr_unit *atsru;
3736 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3737 atsru = dmar_find_atsr(atsr);
3739 list_del_rcu(&atsru->list);
3741 intel_iommu_free_atsr(atsru);
3747 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3751 struct acpi_dmar_atsr *atsr;
3752 struct dmar_atsr_unit *atsru;
3754 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3755 atsru = dmar_find_atsr(atsr);
3759 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
3760 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3768 static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
3770 struct dmar_satc_unit *satcu;
3771 struct acpi_dmar_satc *tmp;
3773 list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
3775 tmp = (struct acpi_dmar_satc *)satcu->hdr;
3776 if (satc->segment != tmp->segment)
3778 if (satc->header.length != tmp->header.length)
3780 if (memcmp(satc, tmp, satc->header.length) == 0)
3787 int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
3789 struct acpi_dmar_satc *satc;
3790 struct dmar_satc_unit *satcu;
3792 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3795 satc = container_of(hdr, struct acpi_dmar_satc, header);
3796 satcu = dmar_find_satc(satc);
3800 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
3804 satcu->hdr = (void *)(satcu + 1);
3805 memcpy(satcu->hdr, hdr, hdr->length);
3806 satcu->atc_required = satc->flags & 0x1;
3807 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
3808 (void *)satc + satc->header.length,
3809 &satcu->devices_cnt);
3810 if (satcu->devices_cnt && !satcu->devices) {
3814 list_add_rcu(&satcu->list, &dmar_satc_units);
3819 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3822 struct intel_iommu *iommu = dmaru->iommu;
3824 if (g_iommus[iommu->seq_id])
3827 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
3831 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3832 pr_warn("%s: Doesn't support hardware pass through.\n",
3836 if (!ecap_sc_support(iommu->ecap) &&
3837 domain_update_iommu_snooping(iommu)) {
3838 pr_warn("%s: Doesn't support snooping.\n",
3842 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
3843 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3844 pr_warn("%s: Doesn't support large page.\n",
3850 * Disable translation if already enabled prior to OS handover.
3852 if (iommu->gcmd & DMA_GCMD_TE)
3853 iommu_disable_translation(iommu);
3855 g_iommus[iommu->seq_id] = iommu;
3856 ret = iommu_init_domains(iommu);
3858 ret = iommu_alloc_root_entry(iommu);
3862 intel_svm_check(iommu);
3864 if (dmaru->ignored) {
3866 * we always have to disable PMRs or DMA may fail on this device
3869 iommu_disable_protect_mem_regions(iommu);
3873 intel_iommu_init_qi(iommu);
3874 iommu_flush_write_buffer(iommu);
3876 #ifdef CONFIG_INTEL_IOMMU_SVM
3877 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3878 ret = intel_svm_enable_prq(iommu);
3883 ret = dmar_set_interrupt(iommu);
3887 iommu_set_root_entry(iommu);
3888 iommu_enable_translation(iommu);
3890 iommu_disable_protect_mem_regions(iommu);
3894 disable_dmar_iommu(iommu);
3896 free_dmar_iommu(iommu);
3900 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3903 struct intel_iommu *iommu = dmaru->iommu;
3905 if (!intel_iommu_enabled)
3911 ret = intel_iommu_add(dmaru);
3913 disable_dmar_iommu(iommu);
3914 free_dmar_iommu(iommu);
3920 static void intel_iommu_free_dmars(void)
3922 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3923 struct dmar_atsr_unit *atsru, *atsr_n;
3924 struct dmar_satc_unit *satcu, *satc_n;
3926 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3927 list_del(&rmrru->list);
3928 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3932 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3933 list_del(&atsru->list);
3934 intel_iommu_free_atsr(atsru);
3936 list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
3937 list_del(&satcu->list);
3938 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
3943 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3946 struct pci_bus *bus;
3947 struct pci_dev *bridge = NULL;
3949 struct acpi_dmar_atsr *atsr;
3950 struct dmar_atsr_unit *atsru;
3952 dev = pci_physfn(dev);
3953 for (bus = dev->bus; bus; bus = bus->parent) {
3955 /* If it's an integrated device, allow ATS */
3958 /* Connected via non-PCIe: no ATS */
3959 if (!pci_is_pcie(bridge) ||
3960 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3962 /* If we found the root port, look it up in the ATSR */
3963 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3968 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3969 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3970 if (atsr->segment != pci_domain_nr(dev->bus))
3973 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3974 if (tmp == &bridge->dev)
3977 if (atsru->include_all)
3987 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3990 struct dmar_rmrr_unit *rmrru;
3991 struct dmar_atsr_unit *atsru;
3992 struct dmar_satc_unit *satcu;
3993 struct acpi_dmar_atsr *atsr;
3994 struct acpi_dmar_reserved_memory *rmrr;
3995 struct acpi_dmar_satc *satc;
3997 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4000 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4001 rmrr = container_of(rmrru->hdr,
4002 struct acpi_dmar_reserved_memory, header);
4003 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4004 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4005 ((void *)rmrr) + rmrr->header.length,
4006 rmrr->segment, rmrru->devices,
4007 rmrru->devices_cnt);
4010 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4011 dmar_remove_dev_scope(info, rmrr->segment,
4012 rmrru->devices, rmrru->devices_cnt);
4016 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4017 if (atsru->include_all)
4020 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4021 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4022 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4023 (void *)atsr + atsr->header.length,
4024 atsr->segment, atsru->devices,
4025 atsru->devices_cnt);
4030 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4031 if (dmar_remove_dev_scope(info, atsr->segment,
4032 atsru->devices, atsru->devices_cnt))
4036 list_for_each_entry(satcu, &dmar_satc_units, list) {
4037 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
4038 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4039 ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
4040 (void *)satc + satc->header.length,
4041 satc->segment, satcu->devices,
4042 satcu->devices_cnt);
4047 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4048 if (dmar_remove_dev_scope(info, satc->segment,
4049 satcu->devices, satcu->devices_cnt))
4057 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4058 unsigned long val, void *v)
4060 struct memory_notify *mhp = v;
4061 unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4062 unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
4066 case MEM_GOING_ONLINE:
4067 if (iommu_domain_identity_map(si_domain,
4068 start_vpfn, last_vpfn)) {
4069 pr_warn("Failed to build identity map for [%lx-%lx]\n",
4070 start_vpfn, last_vpfn);
4076 case MEM_CANCEL_ONLINE:
4078 struct dmar_drhd_unit *drhd;
4079 struct intel_iommu *iommu;
4080 struct page *freelist;
4082 freelist = domain_unmap(si_domain,
4083 start_vpfn, last_vpfn,
4087 for_each_active_iommu(iommu, drhd)
4088 iommu_flush_iotlb_psi(iommu, si_domain,
4089 start_vpfn, mhp->nr_pages,
4092 dma_free_pagelist(freelist);
4100 static struct notifier_block intel_iommu_memory_nb = {
4101 .notifier_call = intel_iommu_memory_notifier,
4105 static void intel_disable_iommus(void)
4107 struct intel_iommu *iommu = NULL;
4108 struct dmar_drhd_unit *drhd;
4110 for_each_iommu(iommu, drhd)
4111 iommu_disable_translation(iommu);
4114 void intel_iommu_shutdown(void)
4116 struct dmar_drhd_unit *drhd;
4117 struct intel_iommu *iommu = NULL;
4119 if (no_iommu || dmar_disabled)
4122 down_write(&dmar_global_lock);
4124 /* Disable PMRs explicitly here. */
4125 for_each_iommu(iommu, drhd)
4126 iommu_disable_protect_mem_regions(iommu);
4128 /* Make sure the IOMMUs are switched off */
4129 intel_disable_iommus();
4131 up_write(&dmar_global_lock);
4134 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4136 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4138 return container_of(iommu_dev, struct intel_iommu, iommu);
4141 static ssize_t intel_iommu_show_version(struct device *dev,
4142 struct device_attribute *attr,
4145 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4146 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4147 return sprintf(buf, "%d:%d\n",
4148 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4150 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4152 static ssize_t intel_iommu_show_address(struct device *dev,
4153 struct device_attribute *attr,
4156 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4157 return sprintf(buf, "%llx\n", iommu->reg_phys);
4159 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4161 static ssize_t intel_iommu_show_cap(struct device *dev,
4162 struct device_attribute *attr,
4165 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4166 return sprintf(buf, "%llx\n", iommu->cap);
4168 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4170 static ssize_t intel_iommu_show_ecap(struct device *dev,
4171 struct device_attribute *attr,
4174 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4175 return sprintf(buf, "%llx\n", iommu->ecap);
4177 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4179 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4180 struct device_attribute *attr,
4183 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4184 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4186 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4188 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4189 struct device_attribute *attr,
4192 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4193 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4194 cap_ndoms(iommu->cap)));
4196 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4198 static struct attribute *intel_iommu_attrs[] = {
4199 &dev_attr_version.attr,
4200 &dev_attr_address.attr,
4202 &dev_attr_ecap.attr,
4203 &dev_attr_domains_supported.attr,
4204 &dev_attr_domains_used.attr,
4208 static struct attribute_group intel_iommu_group = {
4209 .name = "intel-iommu",
4210 .attrs = intel_iommu_attrs,
4213 const struct attribute_group *intel_iommu_groups[] = {
4218 static inline bool has_external_pci(void)
4220 struct pci_dev *pdev = NULL;
4222 for_each_pci_dev(pdev)
4223 if (pdev->external_facing)
4229 static int __init platform_optin_force_iommu(void)
4231 if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
4234 if (no_iommu || dmar_disabled)
4235 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4238 * If Intel-IOMMU is disabled by default, we will apply identity
4239 * map for all devices except those marked as being untrusted.
4242 iommu_set_default_passthrough(false);
4250 static int __init probe_acpi_namespace_devices(void)
4252 struct dmar_drhd_unit *drhd;
4253 /* To avoid a -Wunused-but-set-variable warning. */
4254 struct intel_iommu *iommu __maybe_unused;
4258 for_each_active_iommu(iommu, drhd) {
4259 for_each_active_dev_scope(drhd->devices,
4260 drhd->devices_cnt, i, dev) {
4261 struct acpi_device_physical_node *pn;
4262 struct iommu_group *group;
4263 struct acpi_device *adev;
4265 if (dev->bus != &acpi_bus_type)
4268 adev = to_acpi_device(dev);
4269 mutex_lock(&adev->physical_node_lock);
4270 list_for_each_entry(pn,
4271 &adev->physical_node_list, node) {
4272 group = iommu_group_get(pn->dev);
4274 iommu_group_put(group);
4278 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4279 ret = iommu_probe_device(pn->dev);
4283 mutex_unlock(&adev->physical_node_lock);
4293 int __init intel_iommu_init(void)
4296 struct dmar_drhd_unit *drhd;
4297 struct intel_iommu *iommu;
4300 * Intel IOMMU is required for a TXT/tboot launch or platform
4301 * opt in, so enforce that.
4303 force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
4304 platform_optin_force_iommu();
4306 if (iommu_init_mempool()) {
4308 panic("tboot: Failed to initialize iommu memory\n");
4312 down_write(&dmar_global_lock);
4313 if (dmar_table_init()) {
4315 panic("tboot: Failed to initialize DMAR table\n");
4319 if (dmar_dev_scope_init() < 0) {
4321 panic("tboot: Failed to initialize DMAR device scope\n");
4325 up_write(&dmar_global_lock);
4328 * The bus notifier takes the dmar_global_lock, so lockdep will
4329 * complain later when we register it under the lock.
4331 dmar_register_bus_notifier();
4333 down_write(&dmar_global_lock);
4336 intel_iommu_debugfs_init();
4338 if (no_iommu || dmar_disabled) {
4340 * We exit the function here to ensure IOMMU's remapping and
4341 * mempool aren't setup, which means that the IOMMU's PMRs
4342 * won't be disabled via the call to init_dmars(). So disable
4343 * it explicitly here. The PMRs were setup by tboot prior to
4344 * calling SENTER, but the kernel is expected to reset/tear
4347 if (intel_iommu_tboot_noforce) {
4348 for_each_iommu(iommu, drhd)
4349 iommu_disable_protect_mem_regions(iommu);
4353 * Make sure the IOMMUs are switched off, even when we
4354 * boot into a kexec kernel and the previous kernel left
4357 intel_disable_iommus();
4361 if (list_empty(&dmar_rmrr_units))
4362 pr_info("No RMRR found\n");
4364 if (list_empty(&dmar_atsr_units))
4365 pr_info("No ATSR found\n");
4367 if (list_empty(&dmar_satc_units))
4368 pr_info("No SATC found\n");
4371 intel_iommu_gfx_mapped = 1;
4373 init_no_remapping_devices();
4378 panic("tboot: Failed to initialize DMARs\n");
4379 pr_err("Initialization failed\n");
4382 up_write(&dmar_global_lock);
4384 init_iommu_pm_ops();
4386 down_read(&dmar_global_lock);
4387 for_each_active_iommu(iommu, drhd) {
4389 * The flush queue implementation does not perform
4390 * page-selective invalidations that are required for efficient
4391 * TLB flushes in virtual environments. The benefit of batching
4392 * is likely to be much lower than the overhead of synchronizing
4393 * the virtual and physical IOMMU page-tables.
4395 if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
4396 pr_warn("IOMMU batching is disabled due to virtualization");
4397 intel_iommu_strict = 1;
4399 iommu_device_sysfs_add(&iommu->iommu, NULL,
4402 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
4404 up_read(&dmar_global_lock);
4406 iommu_set_dma_strict(intel_iommu_strict);
4407 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4408 if (si_domain && !hw_pass_through)
4409 register_memory_notifier(&intel_iommu_memory_nb);
4411 down_read(&dmar_global_lock);
4412 if (probe_acpi_namespace_devices())
4413 pr_warn("ACPI name space devices didn't probe correctly\n");
4415 /* Finally, we enable the DMA remapping hardware. */
4416 for_each_iommu(iommu, drhd) {
4417 if (!drhd->ignored && !translation_pre_enabled(iommu))
4418 iommu_enable_translation(iommu);
4420 iommu_disable_protect_mem_regions(iommu);
4422 up_read(&dmar_global_lock);
4424 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4426 intel_iommu_enabled = 1;
4431 intel_iommu_free_dmars();
4432 up_write(&dmar_global_lock);
4433 iommu_exit_mempool();
4437 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4439 struct intel_iommu *iommu = opaque;
4441 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4446 * NB - intel-iommu lacks any sort of reference counting for the users of
4447 * dependent devices. If multiple endpoints have intersecting dependent
4448 * devices, unbinding the driver from any one of them will possibly leave
4449 * the others unable to operate.
4451 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4453 if (!iommu || !dev || !dev_is_pci(dev))
4456 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4459 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4461 struct dmar_domain *domain;
4462 struct intel_iommu *iommu;
4463 unsigned long flags;
4465 assert_spin_locked(&device_domain_lock);
4470 iommu = info->iommu;
4471 domain = info->domain;
4474 if (dev_is_pci(info->dev) && sm_supported(iommu))
4475 intel_pasid_tear_down_entry(iommu, info->dev,
4476 PASID_RID2PASID, false);
4478 iommu_disable_dev_iotlb(info);
4479 if (!dev_is_real_dma_subdevice(info->dev))
4480 domain_context_clear(iommu, info->dev);
4481 intel_pasid_free_table(info->dev);
4484 unlink_domain_info(info);
4486 spin_lock_irqsave(&iommu->lock, flags);
4487 domain_detach_iommu(domain, iommu);
4488 spin_unlock_irqrestore(&iommu->lock, flags);
4490 free_devinfo_mem(info);
4493 static void dmar_remove_one_dev_info(struct device *dev)
4495 struct device_domain_info *info;
4496 unsigned long flags;
4498 spin_lock_irqsave(&device_domain_lock, flags);
4499 info = get_domain_info(dev);
4501 __dmar_remove_one_dev_info(info);
4502 spin_unlock_irqrestore(&device_domain_lock, flags);
4505 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4509 /* calculate AGAW */
4510 domain->gaw = guest_width;
4511 adjust_width = guestwidth_to_adjustwidth(guest_width);
4512 domain->agaw = width_to_agaw(adjust_width);
4514 domain->iommu_coherency = 0;
4515 domain->iommu_snooping = 0;
4516 domain->iommu_superpage = 0;
4517 domain->max_addr = 0;
4519 /* always allocate the top pgd */
4520 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4523 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4527 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4529 struct dmar_domain *dmar_domain;
4530 struct iommu_domain *domain;
4533 case IOMMU_DOMAIN_DMA:
4534 case IOMMU_DOMAIN_UNMANAGED:
4535 dmar_domain = alloc_domain(0);
4537 pr_err("Can't allocate dmar_domain\n");
4540 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4541 pr_err("Domain initialization failed\n");
4542 domain_exit(dmar_domain);
4546 if (type == IOMMU_DOMAIN_DMA &&
4547 iommu_get_dma_cookie(&dmar_domain->domain))
4550 domain = &dmar_domain->domain;
4551 domain->geometry.aperture_start = 0;
4552 domain->geometry.aperture_end =
4553 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4554 domain->geometry.force_aperture = true;
4557 case IOMMU_DOMAIN_IDENTITY:
4558 return &si_domain->domain;
4566 static void intel_iommu_domain_free(struct iommu_domain *domain)
4568 if (domain != &si_domain->domain)
4569 domain_exit(to_dmar_domain(domain));
4573 * Check whether a @domain could be attached to the @dev through the
4574 * aux-domain attach/detach APIs.
4577 is_aux_domain(struct device *dev, struct iommu_domain *domain)
4579 struct device_domain_info *info = get_domain_info(dev);
4581 return info && info->auxd_enabled &&
4582 domain->type == IOMMU_DOMAIN_UNMANAGED;
4585 static inline struct subdev_domain_info *
4586 lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
4588 struct subdev_domain_info *sinfo;
4590 if (!list_empty(&domain->subdevices)) {
4591 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
4592 if (sinfo->pdev == dev)
4600 static int auxiliary_link_device(struct dmar_domain *domain,
4603 struct device_domain_info *info = get_domain_info(dev);
4604 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4606 assert_spin_locked(&device_domain_lock);
4611 sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
4614 sinfo->domain = domain;
4616 list_add(&sinfo->link_phys, &info->subdevices);
4617 list_add(&sinfo->link_domain, &domain->subdevices);
4620 return ++sinfo->users;
4623 static int auxiliary_unlink_device(struct dmar_domain *domain,
4626 struct device_domain_info *info = get_domain_info(dev);
4627 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4630 assert_spin_locked(&device_domain_lock);
4631 if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
4634 ret = --sinfo->users;
4636 list_del(&sinfo->link_phys);
4637 list_del(&sinfo->link_domain);
4644 static int aux_domain_add_dev(struct dmar_domain *domain,
4648 unsigned long flags;
4649 struct intel_iommu *iommu;
4651 iommu = device_to_iommu(dev, NULL, NULL);
4655 if (domain->default_pasid <= 0) {
4658 /* No private data needed for the default pasid */
4659 pasid = ioasid_alloc(NULL, PASID_MIN,
4660 pci_max_pasids(to_pci_dev(dev)) - 1,
4662 if (pasid == INVALID_IOASID) {
4663 pr_err("Can't allocate default pasid\n");
4666 domain->default_pasid = pasid;
4669 spin_lock_irqsave(&device_domain_lock, flags);
4670 ret = auxiliary_link_device(domain, dev);
4675 * Subdevices from the same physical device can be attached to the
4676 * same domain. For such cases, only the first subdevice attachment
4677 * needs to go through the full steps in this function. So if ret >
4684 * iommu->lock must be held to attach domain to iommu and setup the
4685 * pasid entry for second level translation.
4687 spin_lock(&iommu->lock);
4688 ret = domain_attach_iommu(domain, iommu);
4692 /* Setup the PASID entry for mediated devices: */
4693 if (domain_use_first_level(domain))
4694 ret = domain_setup_first_level(iommu, domain, dev,
4695 domain->default_pasid);
4697 ret = intel_pasid_setup_second_level(iommu, domain, dev,
4698 domain->default_pasid);
4702 spin_unlock(&iommu->lock);
4704 spin_unlock_irqrestore(&device_domain_lock, flags);
4709 domain_detach_iommu(domain, iommu);
4711 spin_unlock(&iommu->lock);
4712 auxiliary_unlink_device(domain, dev);
4714 spin_unlock_irqrestore(&device_domain_lock, flags);
4715 if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4716 ioasid_put(domain->default_pasid);
4721 static void aux_domain_remove_dev(struct dmar_domain *domain,
4724 struct device_domain_info *info;
4725 struct intel_iommu *iommu;
4726 unsigned long flags;
4728 if (!is_aux_domain(dev, &domain->domain))
4731 spin_lock_irqsave(&device_domain_lock, flags);
4732 info = get_domain_info(dev);
4733 iommu = info->iommu;
4735 if (!auxiliary_unlink_device(domain, dev)) {
4736 spin_lock(&iommu->lock);
4737 intel_pasid_tear_down_entry(iommu, dev,
4738 domain->default_pasid, false);
4739 domain_detach_iommu(domain, iommu);
4740 spin_unlock(&iommu->lock);
4743 spin_unlock_irqrestore(&device_domain_lock, flags);
4745 if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4746 ioasid_put(domain->default_pasid);
4749 static int prepare_domain_attach_device(struct iommu_domain *domain,
4752 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4753 struct intel_iommu *iommu;
4756 iommu = device_to_iommu(dev, NULL, NULL);
4760 /* check if this iommu agaw is sufficient for max mapped address */
4761 addr_width = agaw_to_width(iommu->agaw);
4762 if (addr_width > cap_mgaw(iommu->cap))
4763 addr_width = cap_mgaw(iommu->cap);
4765 if (dmar_domain->max_addr > (1LL << addr_width)) {
4766 dev_err(dev, "%s: iommu width (%d) is not "
4767 "sufficient for the mapped address (%llx)\n",
4768 __func__, addr_width, dmar_domain->max_addr);
4771 dmar_domain->gaw = addr_width;
4774 * Knock out extra levels of page tables if necessary
4776 while (iommu->agaw < dmar_domain->agaw) {
4777 struct dma_pte *pte;
4779 pte = dmar_domain->pgd;
4780 if (dma_pte_present(pte)) {
4781 dmar_domain->pgd = (struct dma_pte *)
4782 phys_to_virt(dma_pte_addr(pte));
4783 free_pgtable_page(pte);
4785 dmar_domain->agaw--;
4791 static int intel_iommu_attach_device(struct iommu_domain *domain,
4796 if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
4797 device_is_rmrr_locked(dev)) {
4798 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4802 if (is_aux_domain(dev, domain))
4805 /* normally dev is not mapped */
4806 if (unlikely(domain_context_mapped(dev))) {
4807 struct dmar_domain *old_domain;
4809 old_domain = find_domain(dev);
4811 dmar_remove_one_dev_info(dev);
4814 ret = prepare_domain_attach_device(domain, dev);
4818 return domain_add_dev_info(to_dmar_domain(domain), dev);
4821 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
4826 if (!is_aux_domain(dev, domain))
4829 ret = prepare_domain_attach_device(domain, dev);
4833 return aux_domain_add_dev(to_dmar_domain(domain), dev);
4836 static void intel_iommu_detach_device(struct iommu_domain *domain,
4839 dmar_remove_one_dev_info(dev);
4842 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
4845 aux_domain_remove_dev(to_dmar_domain(domain), dev);
4848 #ifdef CONFIG_INTEL_IOMMU_SVM
4850 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
4851 * VT-d granularity. Invalidation is typically included in the unmap operation
4852 * as a result of DMA or VFIO unmap. However, for assigned devices guest
4853 * owns the first level page tables. Invalidations of translation caches in the
4854 * guest are trapped and passed down to the host.
4856 * vIOMMU in the guest will only expose first level page tables, therefore
4857 * we do not support IOTLB granularity for request without PASID (second level).
4859 * For example, to find the VT-d granularity encoding for IOTLB
4860 * type and page selective granularity within PASID:
4861 * X: indexed by iommu cache type
4862 * Y: indexed by enum iommu_inv_granularity
4863 * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
4867 inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
4869 * PASID based IOTLB invalidation: PASID selective (per PASID),
4870 * page selective (address granularity)
4872 {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
4873 /* PASID based dev TLBs */
4874 {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
4876 {-EINVAL, -EINVAL, -EINVAL}
4879 static inline int to_vtd_granularity(int type, int granu)
4881 return inv_type_granu_table[type][granu];
4884 static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
4886 u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
4888 /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
4889 * IOMMU cache invalidate API passes granu_size in bytes, and number of
4890 * granu size in contiguous memory.
4892 return order_base_2(nr_pages);
4896 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
4897 struct iommu_cache_invalidate_info *inv_info)
4899 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4900 struct device_domain_info *info;
4901 struct intel_iommu *iommu;
4902 unsigned long flags;
4909 if (!inv_info || !dmar_domain)
4912 if (!dev || !dev_is_pci(dev))
4915 iommu = device_to_iommu(dev, &bus, &devfn);
4919 if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
4922 spin_lock_irqsave(&device_domain_lock, flags);
4923 spin_lock(&iommu->lock);
4924 info = get_domain_info(dev);
4929 did = dmar_domain->iommu_did[iommu->seq_id];
4930 sid = PCI_DEVID(bus, devfn);
4932 /* Size is only valid in address selective invalidation */
4933 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
4934 size = to_vtd_size(inv_info->granu.addr_info.granule_size,
4935 inv_info->granu.addr_info.nb_granules);
4937 for_each_set_bit(cache_type,
4938 (unsigned long *)&inv_info->cache,
4939 IOMMU_CACHE_INV_TYPE_NR) {
4944 granu = to_vtd_granularity(cache_type, inv_info->granularity);
4945 if (granu == -EINVAL) {
4946 pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
4947 cache_type, inv_info->granularity);
4952 * PASID is stored in different locations based on the
4955 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
4956 (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
4957 pasid = inv_info->granu.pasid_info.pasid;
4958 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4959 (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
4960 pasid = inv_info->granu.addr_info.pasid;
4962 switch (BIT(cache_type)) {
4963 case IOMMU_CACHE_INV_TYPE_IOTLB:
4964 /* HW will ignore LSB bits based on address mask */
4965 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4967 (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
4968 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
4969 inv_info->granu.addr_info.addr, size);
4973 * If granu is PASID-selective, address is ignored.
4974 * We use npages = -1 to indicate that.
4976 qi_flush_piotlb(iommu, did, pasid,
4977 mm_to_dma_pfn(inv_info->granu.addr_info.addr),
4978 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
4979 inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
4981 if (!info->ats_enabled)
4984 * Always flush device IOTLB if ATS is enabled. vIOMMU
4985 * in the guest may assume IOTLB flush is inclusive,
4986 * which is more efficient.
4989 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
4991 * PASID based device TLB invalidation does not support
4992 * IOMMU_INV_GRANU_PASID granularity but only supports
4993 * IOMMU_INV_GRANU_ADDR.
4994 * The equivalent of that is we set the size to be the
4995 * entire range of 64 bit. User only provides PASID info
4996 * without address info. So we set addr to 0.
4998 if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
4999 size = 64 - VTD_PAGE_SHIFT;
5001 } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
5002 addr = inv_info->granu.addr_info.addr;
5005 if (info->ats_enabled)
5006 qi_flush_dev_iotlb_pasid(iommu, sid,
5008 info->ats_qdep, addr,
5011 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5014 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5020 spin_unlock(&iommu->lock);
5021 spin_unlock_irqrestore(&device_domain_lock, flags);
5027 static int intel_iommu_map(struct iommu_domain *domain,
5028 unsigned long iova, phys_addr_t hpa,
5029 size_t size, int iommu_prot, gfp_t gfp)
5031 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5035 if (iommu_prot & IOMMU_READ)
5036 prot |= DMA_PTE_READ;
5037 if (iommu_prot & IOMMU_WRITE)
5038 prot |= DMA_PTE_WRITE;
5039 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5040 prot |= DMA_PTE_SNP;
5042 max_addr = iova + size;
5043 if (dmar_domain->max_addr < max_addr) {
5046 /* check if minimum agaw is sufficient for mapped address */
5047 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5048 if (end < max_addr) {
5049 pr_err("%s: iommu width (%d) is not "
5050 "sufficient for the mapped address (%llx)\n",
5051 __func__, dmar_domain->gaw, max_addr);
5054 dmar_domain->max_addr = max_addr;
5056 /* Round up size to next multiple of PAGE_SIZE, if it and
5057 the low bits of hpa would take us onto the next page */
5058 size = aligned_nrpages(hpa, size);
5059 return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5060 hpa >> VTD_PAGE_SHIFT, size, prot);
5063 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5064 unsigned long iova, size_t size,
5065 struct iommu_iotlb_gather *gather)
5067 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5068 unsigned long start_pfn, last_pfn;
5071 /* Cope with horrid API which requires us to unmap more than the
5072 size argument if it happens to be a large-page mapping. */
5073 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5075 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5076 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5078 start_pfn = iova >> VTD_PAGE_SHIFT;
5079 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5081 gather->freelist = domain_unmap(dmar_domain, start_pfn,
5082 last_pfn, gather->freelist);
5084 if (dmar_domain->max_addr == iova + size)
5085 dmar_domain->max_addr = iova;
5087 iommu_iotlb_gather_add_page(domain, gather, iova, size);
5092 static void intel_iommu_tlb_sync(struct iommu_domain *domain,
5093 struct iommu_iotlb_gather *gather)
5095 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5096 unsigned long iova_pfn = IOVA_PFN(gather->start);
5097 size_t size = gather->end - gather->start;
5098 unsigned long start_pfn;
5099 unsigned long nrpages;
5102 nrpages = aligned_nrpages(gather->start, size);
5103 start_pfn = mm_to_dma_pfn(iova_pfn);
5105 for_each_domain_iommu(iommu_id, dmar_domain)
5106 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5107 start_pfn, nrpages, !gather->freelist, 0);
5109 dma_free_pagelist(gather->freelist);
5112 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5115 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5116 struct dma_pte *pte;
5120 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5121 if (pte && dma_pte_present(pte))
5122 phys = dma_pte_addr(pte) +
5123 (iova & (BIT_MASK(level_to_offset_bits(level) +
5124 VTD_PAGE_SHIFT) - 1));
5129 static bool intel_iommu_capable(enum iommu_cap cap)
5131 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5132 return domain_update_iommu_snooping(NULL) == 1;
5133 if (cap == IOMMU_CAP_INTR_REMAP)
5134 return irq_remapping_enabled == 1;
5139 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5141 struct intel_iommu *iommu;
5143 iommu = device_to_iommu(dev, NULL, NULL);
5145 return ERR_PTR(-ENODEV);
5147 if (translation_pre_enabled(iommu))
5148 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
5150 return &iommu->iommu;
5153 static void intel_iommu_release_device(struct device *dev)
5155 struct intel_iommu *iommu;
5157 iommu = device_to_iommu(dev, NULL, NULL);
5161 dmar_remove_one_dev_info(dev);
5163 set_dma_ops(dev, NULL);
5166 static void intel_iommu_probe_finalize(struct device *dev)
5168 dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
5169 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
5170 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5172 if (domain && domain->type == IOMMU_DOMAIN_DMA)
5173 iommu_setup_dma_ops(dev, base,
5174 __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
5176 set_dma_ops(dev, NULL);
5179 static void intel_iommu_get_resv_regions(struct device *device,
5180 struct list_head *head)
5182 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5183 struct iommu_resv_region *reg;
5184 struct dmar_rmrr_unit *rmrr;
5185 struct device *i_dev;
5188 down_read(&dmar_global_lock);
5189 for_each_rmrr_units(rmrr) {
5190 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5192 struct iommu_resv_region *resv;
5193 enum iommu_resv_type type;
5196 if (i_dev != device &&
5197 !is_downstream_to_pci_bridge(device, i_dev))
5200 length = rmrr->end_address - rmrr->base_address + 1;
5202 type = device_rmrr_is_relaxable(device) ?
5203 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5205 resv = iommu_alloc_resv_region(rmrr->base_address,
5206 length, prot, type);
5210 list_add_tail(&resv->list, head);
5213 up_read(&dmar_global_lock);
5215 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5216 if (dev_is_pci(device)) {
5217 struct pci_dev *pdev = to_pci_dev(device);
5219 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5220 reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
5221 IOMMU_RESV_DIRECT_RELAXABLE);
5223 list_add_tail(®->list, head);
5226 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5228 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5229 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5233 list_add_tail(®->list, head);
5236 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5238 struct device_domain_info *info;
5239 struct context_entry *context;
5240 struct dmar_domain *domain;
5241 unsigned long flags;
5245 domain = find_domain(dev);
5249 spin_lock_irqsave(&device_domain_lock, flags);
5250 spin_lock(&iommu->lock);
5253 info = get_domain_info(dev);
5254 if (!info || !info->pasid_supported)
5257 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5258 if (WARN_ON(!context))
5261 ctx_lo = context[0].lo;
5263 if (!(ctx_lo & CONTEXT_PASIDE)) {
5264 ctx_lo |= CONTEXT_PASIDE;
5265 context[0].lo = ctx_lo;
5267 iommu->flush.flush_context(iommu,
5268 domain->iommu_did[iommu->seq_id],
5269 PCI_DEVID(info->bus, info->devfn),
5270 DMA_CCMD_MASK_NOBIT,
5271 DMA_CCMD_DEVICE_INVL);
5274 /* Enable PASID support in the device, if it wasn't already */
5275 if (!info->pasid_enabled)
5276 iommu_enable_dev_iotlb(info);
5281 spin_unlock(&iommu->lock);
5282 spin_unlock_irqrestore(&device_domain_lock, flags);
5287 static struct iommu_group *intel_iommu_device_group(struct device *dev)
5289 if (dev_is_pci(dev))
5290 return pci_device_group(dev);
5291 return generic_device_group(dev);
5294 static int intel_iommu_enable_auxd(struct device *dev)
5296 struct device_domain_info *info;
5297 struct intel_iommu *iommu;
5298 unsigned long flags;
5301 iommu = device_to_iommu(dev, NULL, NULL);
5302 if (!iommu || dmar_disabled)
5305 if (!sm_supported(iommu) || !pasid_supported(iommu))
5308 ret = intel_iommu_enable_pasid(iommu, dev);
5312 spin_lock_irqsave(&device_domain_lock, flags);
5313 info = get_domain_info(dev);
5314 info->auxd_enabled = 1;
5315 spin_unlock_irqrestore(&device_domain_lock, flags);
5320 static int intel_iommu_disable_auxd(struct device *dev)
5322 struct device_domain_info *info;
5323 unsigned long flags;
5325 spin_lock_irqsave(&device_domain_lock, flags);
5326 info = get_domain_info(dev);
5327 if (!WARN_ON(!info))
5328 info->auxd_enabled = 0;
5329 spin_unlock_irqrestore(&device_domain_lock, flags);
5335 * A PCI express designated vendor specific extended capability is defined
5336 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5337 * for system software and tools to detect endpoint devices supporting the
5338 * Intel scalable IO virtualization without host driver dependency.
5340 * Returns the address of the matching extended capability structure within
5341 * the device's PCI configuration space or 0 if the device does not support
5344 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5349 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5351 pci_read_config_word(pdev, pos + 4, &vendor);
5352 pci_read_config_word(pdev, pos + 8, &id);
5353 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5356 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5363 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5365 struct device_domain_info *info = get_domain_info(dev);
5367 if (feat == IOMMU_DEV_FEAT_AUX) {
5370 if (!dev_is_pci(dev) || dmar_disabled ||
5371 !scalable_mode_support() || !pasid_mode_support())
5374 ret = pci_pasid_features(to_pci_dev(dev));
5378 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5381 if (feat == IOMMU_DEV_FEAT_IOPF)
5382 return info && info->pri_supported;
5384 if (feat == IOMMU_DEV_FEAT_SVA)
5385 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
5386 info->pasid_supported && info->pri_supported &&
5387 info->ats_supported;
5393 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5395 if (feat == IOMMU_DEV_FEAT_AUX)
5396 return intel_iommu_enable_auxd(dev);
5398 if (feat == IOMMU_DEV_FEAT_IOPF)
5399 return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
5401 if (feat == IOMMU_DEV_FEAT_SVA) {
5402 struct device_domain_info *info = get_domain_info(dev);
5407 if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
5410 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
5418 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5420 if (feat == IOMMU_DEV_FEAT_AUX)
5421 return intel_iommu_disable_auxd(dev);
5427 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5429 struct device_domain_info *info = get_domain_info(dev);
5431 if (feat == IOMMU_DEV_FEAT_AUX)
5432 return scalable_mode_support() && info && info->auxd_enabled;
5438 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5440 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5442 return dmar_domain->default_pasid > 0 ?
5443 dmar_domain->default_pasid : -EINVAL;
5446 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5449 return attach_deferred(dev);
5453 intel_iommu_enable_nesting(struct iommu_domain *domain)
5455 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5456 unsigned long flags;
5459 spin_lock_irqsave(&device_domain_lock, flags);
5460 if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
5461 dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
5462 dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
5465 spin_unlock_irqrestore(&device_domain_lock, flags);
5471 * Check that the device does not live on an external facing PCI port that is
5472 * marked as untrusted. Such devices should not be able to apply quirks and
5473 * thus not be able to bypass the IOMMU restrictions.
5475 static bool risky_device(struct pci_dev *pdev)
5477 if (pdev->untrusted) {
5479 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
5480 pdev->vendor, pdev->device);
5481 pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
5487 static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
5488 unsigned long clf_pages)
5490 struct dma_pte *first_pte = NULL, *pte = NULL;
5491 unsigned long lvl_pages = 0;
5494 while (clf_pages > 0) {
5497 pte = pfn_to_dma_pte(domain, clf_pfn, &level);
5501 lvl_pages = lvl_to_nr_pages(level);
5504 if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
5507 clf_pages -= lvl_pages;
5508 clf_pfn += lvl_pages;
5511 if (!clf_pages || first_pte_in_page(pte) ||
5512 (level > 1 && clf_pages < lvl_pages)) {
5513 domain_flush_cache(domain, first_pte,
5514 (void *)pte - (void *)first_pte);
5520 static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
5521 unsigned long iova, size_t size)
5523 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5524 unsigned long pages = aligned_nrpages(iova, size);
5525 unsigned long pfn = iova >> VTD_PAGE_SHIFT;
5526 struct intel_iommu *iommu;
5529 if (!dmar_domain->iommu_coherency)
5530 clflush_sync_map(dmar_domain, pfn, pages);
5532 for_each_domain_iommu(iommu_id, dmar_domain) {
5533 iommu = g_iommus[iommu_id];
5534 __mapping_notify_one(iommu, dmar_domain, pfn, pages);
5538 const struct iommu_ops intel_iommu_ops = {
5539 .capable = intel_iommu_capable,
5540 .domain_alloc = intel_iommu_domain_alloc,
5541 .domain_free = intel_iommu_domain_free,
5542 .enable_nesting = intel_iommu_enable_nesting,
5543 .attach_dev = intel_iommu_attach_device,
5544 .detach_dev = intel_iommu_detach_device,
5545 .aux_attach_dev = intel_iommu_aux_attach_device,
5546 .aux_detach_dev = intel_iommu_aux_detach_device,
5547 .aux_get_pasid = intel_iommu_aux_get_pasid,
5548 .map = intel_iommu_map,
5549 .iotlb_sync_map = intel_iommu_iotlb_sync_map,
5550 .unmap = intel_iommu_unmap,
5551 .flush_iotlb_all = intel_flush_iotlb_all,
5552 .iotlb_sync = intel_iommu_tlb_sync,
5553 .iova_to_phys = intel_iommu_iova_to_phys,
5554 .probe_device = intel_iommu_probe_device,
5555 .probe_finalize = intel_iommu_probe_finalize,
5556 .release_device = intel_iommu_release_device,
5557 .get_resv_regions = intel_iommu_get_resv_regions,
5558 .put_resv_regions = generic_iommu_put_resv_regions,
5559 .device_group = intel_iommu_device_group,
5560 .dev_has_feat = intel_iommu_dev_has_feat,
5561 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5562 .dev_enable_feat = intel_iommu_dev_enable_feat,
5563 .dev_disable_feat = intel_iommu_dev_disable_feat,
5564 .is_attach_deferred = intel_iommu_is_attach_deferred,
5565 .def_domain_type = device_def_domain_type,
5566 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5567 #ifdef CONFIG_INTEL_IOMMU_SVM
5568 .cache_invalidate = intel_iommu_sva_invalidate,
5569 .sva_bind_gpasid = intel_svm_bind_gpasid,
5570 .sva_unbind_gpasid = intel_svm_unbind_gpasid,
5571 .sva_bind = intel_svm_bind,
5572 .sva_unbind = intel_svm_unbind,
5573 .sva_get_pasid = intel_svm_get_pasid,
5574 .page_response = intel_svm_page_response,
5578 static void quirk_iommu_igfx(struct pci_dev *dev)
5580 if (risky_device(dev))
5583 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5587 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5588 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
5589 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
5590 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
5591 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
5592 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
5593 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
5594 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
5596 /* Broadwell igfx malfunctions with dmar */
5597 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
5598 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
5599 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
5600 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
5601 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
5602 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
5603 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
5604 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
5605 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
5606 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
5607 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
5608 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
5609 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
5610 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
5611 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
5612 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
5613 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
5614 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
5615 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
5616 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
5617 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
5618 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
5619 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
5620 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
5622 static void quirk_iommu_rwbf(struct pci_dev *dev)
5624 if (risky_device(dev))
5628 * Mobile 4 Series Chipset neglects to set RWBF capability,
5629 * but needs it. Same seems to hold for the desktop versions.
5631 pci_info(dev, "Forcing write-buffer flush capability\n");
5635 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5637 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5638 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5639 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5640 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5641 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5644 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5645 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5646 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5647 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5648 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5649 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5650 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5651 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5653 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5657 if (risky_device(dev))
5660 if (pci_read_config_word(dev, GGC, &ggc))
5663 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5664 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5666 } else if (dmar_map_gfx) {
5667 /* we have to ensure the gfx device is idle before we flush */
5668 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5669 intel_iommu_strict = 1;
5672 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5673 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5674 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5675 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5677 static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
5681 if (!IS_GFX_DEVICE(dev))
5684 ver = (dev->device >> 8) & 0xff;
5685 if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
5686 ver != 0x4e && ver != 0x8a && ver != 0x98 &&
5690 if (risky_device(dev))
5693 pci_info(dev, "Skip IOMMU disabling for graphics\n");
5694 iommu_skip_te_disable = 1;
5696 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
5698 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5699 ISOCH DMAR unit for the Azalia sound device, but not give it any
5700 TLB entries, which causes it to deadlock. Check for that. We do
5701 this in a function called from init_dmars(), instead of in a PCI
5702 quirk, because we don't want to print the obnoxious "BIOS broken"
5703 message if VT-d is actually disabled.
5705 static void __init check_tylersburg_isoch(void)
5707 struct pci_dev *pdev;
5708 uint32_t vtisochctrl;
5710 /* If there's no Azalia in the system anyway, forget it. */
5711 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5715 if (risky_device(pdev)) {
5722 /* System Management Registers. Might be hidden, in which case
5723 we can't do the sanity check. But that's OK, because the
5724 known-broken BIOSes _don't_ actually hide it, so far. */
5725 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5729 if (risky_device(pdev)) {
5734 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5741 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5742 if (vtisochctrl & 1)
5745 /* Drop all bits other than the number of TLB entries */
5746 vtisochctrl &= 0x1c;
5748 /* If we have the recommended number of TLB entries (16), fine. */
5749 if (vtisochctrl == 0x10)
5752 /* Zero TLB entries? You get to ride the short bus to school. */
5754 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5755 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5756 dmi_get_system_info(DMI_BIOS_VENDOR),
5757 dmi_get_system_info(DMI_BIOS_VERSION),
5758 dmi_get_system_info(DMI_PRODUCT_VERSION));
5759 iommu_identity_mapping |= IDENTMAP_AZALIA;
5763 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",