1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Joerg Roedel <jroedel@suse.de>
13 #define pr_fmt(fmt) "DMAR: " fmt
14 #define dev_fmt(fmt) pr_fmt(fmt)
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/debugfs.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/irq.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/pci.h>
25 #include <linux/dmar.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/mempool.h>
28 #include <linux/memory.h>
29 #include <linux/cpu.h>
30 #include <linux/timer.h>
32 #include <linux/iova.h>
33 #include <linux/iommu.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/syscore_ops.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38 #include <linux/pci-ats.h>
39 #include <linux/memblock.h>
40 #include <linux/dma-contiguous.h>
41 #include <linux/dma-direct.h>
42 #include <linux/crash_dump.h>
43 #include <linux/numa.h>
44 #include <linux/swiotlb.h>
45 #include <asm/irq_remapping.h>
46 #include <asm/cacheflush.h>
47 #include <asm/iommu.h>
48 #include <trace/events/intel_iommu.h>
50 #include "../irq_remapping.h"
53 #define ROOT_SIZE VTD_PAGE_SIZE
54 #define CONTEXT_SIZE VTD_PAGE_SIZE
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
61 #define IOAPIC_RANGE_START (0xfee00000)
62 #define IOAPIC_RANGE_END (0xfeefffff)
63 #define IOVA_START_ADDR (0x1000)
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
70 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN (1)
82 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
84 /* page table handling */
85 #define LEVEL_STRIDE (9)
86 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89 * This bitmap is used to advertise the page sizes our hardware support
90 * to the IOMMU core, which will then use this information to split
91 * physically contiguous memory regions it is mapping into page sizes
94 * Traditionally the IOMMU core just handed us the mappings directly,
95 * after making sure the size is an order of a 4KiB page and that the
96 * mapping has natural alignment.
98 * To retain this behavior, we currently advertise that we support
99 * all page sizes that are an order of 4KiB.
101 * If at some point we'd like to utilize the IOMMU core's new behavior,
102 * we could change this to advertise the real page sizes we support.
104 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
106 static inline int agaw_to_level(int agaw)
111 static inline int agaw_to_width(int agaw)
113 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
116 static inline int width_to_agaw(int width)
118 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
121 static inline unsigned int level_to_offset_bits(int level)
123 return (level - 1) * LEVEL_STRIDE;
126 static inline int pfn_level_offset(u64 pfn, int level)
128 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131 static inline u64 level_mask(int level)
133 return -1ULL << level_to_offset_bits(level);
136 static inline u64 level_size(int level)
138 return 1ULL << level_to_offset_bits(level);
141 static inline u64 align_to_level(u64 pfn, int level)
143 return (pfn + level_size(level) - 1) & level_mask(level);
146 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
148 return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
151 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
152 are never going to work. */
153 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
155 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
160 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 static inline unsigned long page_to_dma_pfn(struct page *pg)
164 return mm_to_dma_pfn(page_to_pfn(pg));
166 static inline unsigned long virt_to_dma_pfn(void *p)
168 return page_to_dma_pfn(virt_to_page(p));
171 /* global iommu list, set NULL for ignored DMAR units */
172 static struct intel_iommu **g_iommus;
174 static void __init check_tylersburg_isoch(void);
175 static int rwbf_quirk;
178 * set to 1 to panic kernel if can't successfully enable VT-d
179 * (used when kernel is launched w/ TXT)
181 static int force_on = 0;
182 int intel_iommu_tboot_noforce;
183 static int no_platform_optin;
185 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
191 static phys_addr_t root_entry_lctp(struct root_entry *re)
196 return re->lo & VTD_PAGE_MASK;
200 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
203 static phys_addr_t root_entry_uctp(struct root_entry *re)
208 return re->hi & VTD_PAGE_MASK;
211 static inline void context_clear_pasid_enable(struct context_entry *context)
213 context->lo &= ~(1ULL << 11);
216 static inline bool context_pasid_enabled(struct context_entry *context)
218 return !!(context->lo & (1ULL << 11));
221 static inline void context_set_copied(struct context_entry *context)
223 context->hi |= (1ull << 3);
226 static inline bool context_copied(struct context_entry *context)
228 return !!(context->hi & (1ULL << 3));
231 static inline bool __context_present(struct context_entry *context)
233 return (context->lo & 1);
236 bool context_present(struct context_entry *context)
238 return context_pasid_enabled(context) ?
239 __context_present(context) :
240 __context_present(context) && !context_copied(context);
243 static inline void context_set_present(struct context_entry *context)
248 static inline void context_set_fault_enable(struct context_entry *context)
250 context->lo &= (((u64)-1) << 2) | 1;
253 static inline void context_set_translation_type(struct context_entry *context,
256 context->lo &= (((u64)-1) << 4) | 3;
257 context->lo |= (value & 3) << 2;
260 static inline void context_set_address_root(struct context_entry *context,
263 context->lo &= ~VTD_PAGE_MASK;
264 context->lo |= value & VTD_PAGE_MASK;
267 static inline void context_set_address_width(struct context_entry *context,
270 context->hi |= value & 7;
273 static inline void context_set_domain_id(struct context_entry *context,
276 context->hi |= (value & ((1 << 16) - 1)) << 8;
279 static inline int context_domain_id(struct context_entry *c)
281 return((c->hi >> 8) & 0xffff);
284 static inline void context_clear_entry(struct context_entry *context)
291 * This domain is a statically identity mapping domain.
292 * 1. This domain creats a static 1:1 mapping to all usable memory.
293 * 2. It maps to each iommu if successful.
294 * 3. Each iommu mapps to this domain if successful.
296 static struct dmar_domain *si_domain;
297 static int hw_pass_through = 1;
299 #define for_each_domain_iommu(idx, domain) \
300 for (idx = 0; idx < g_num_of_iommus; idx++) \
301 if (domain->iommu_refcnt[idx])
303 struct dmar_rmrr_unit {
304 struct list_head list; /* list of rmrr units */
305 struct acpi_dmar_header *hdr; /* ACPI header */
306 u64 base_address; /* reserved base address*/
307 u64 end_address; /* reserved end address */
308 struct dmar_dev_scope *devices; /* target devices */
309 int devices_cnt; /* target device count */
312 struct dmar_atsr_unit {
313 struct list_head list; /* list of ATSR units */
314 struct acpi_dmar_header *hdr; /* ACPI header */
315 struct dmar_dev_scope *devices; /* target devices */
316 int devices_cnt; /* target device count */
317 u8 include_all:1; /* include all ports */
320 static LIST_HEAD(dmar_atsr_units);
321 static LIST_HEAD(dmar_rmrr_units);
323 #define for_each_rmrr_units(rmrr) \
324 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
326 /* bitmap for indexing intel_iommus */
327 static int g_num_of_iommus;
329 static void domain_exit(struct dmar_domain *domain);
330 static void domain_remove_dev_info(struct dmar_domain *domain);
331 static void dmar_remove_one_dev_info(struct device *dev);
332 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
333 static int intel_iommu_attach_device(struct iommu_domain *domain,
335 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
338 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
339 int dmar_disabled = 0;
341 int dmar_disabled = 1;
342 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
344 #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
345 int intel_iommu_sm = 1;
348 #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
350 int intel_iommu_enabled = 0;
351 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
353 static int dmar_map_gfx = 1;
354 static int dmar_forcedac;
355 static int intel_iommu_strict;
356 static int intel_iommu_superpage = 1;
357 static int iommu_identity_mapping;
358 static int intel_no_bounce;
359 static int iommu_skip_te_disable;
361 #define IDENTMAP_GFX 2
362 #define IDENTMAP_AZALIA 4
364 int intel_iommu_gfx_mapped;
365 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
367 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
368 struct device_domain_info *get_domain_info(struct device *dev)
370 struct device_domain_info *info;
375 info = dev_iommu_priv_get(dev);
376 if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
382 DEFINE_SPINLOCK(device_domain_lock);
383 static LIST_HEAD(device_domain_list);
385 #define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
386 to_pci_dev(d)->untrusted)
389 * Iterate over elements in device_domain_list and call the specified
390 * callback @fn against each element.
392 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
393 void *data), void *data)
397 struct device_domain_info *info;
399 spin_lock_irqsave(&device_domain_lock, flags);
400 list_for_each_entry(info, &device_domain_list, global) {
401 ret = fn(info, data);
403 spin_unlock_irqrestore(&device_domain_lock, flags);
407 spin_unlock_irqrestore(&device_domain_lock, flags);
412 const struct iommu_ops intel_iommu_ops;
414 static bool translation_pre_enabled(struct intel_iommu *iommu)
416 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
419 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
421 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
424 static void init_translation_status(struct intel_iommu *iommu)
428 gsts = readl(iommu->reg + DMAR_GSTS_REG);
429 if (gsts & DMA_GSTS_TES)
430 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
433 static int __init intel_iommu_setup(char *str)
438 if (!strncmp(str, "on", 2)) {
440 pr_info("IOMMU enabled\n");
441 } else if (!strncmp(str, "off", 3)) {
443 no_platform_optin = 1;
444 pr_info("IOMMU disabled\n");
445 } else if (!strncmp(str, "igfx_off", 8)) {
447 pr_info("Disable GFX device mapping\n");
448 } else if (!strncmp(str, "forcedac", 8)) {
449 pr_info("Forcing DAC for PCI devices\n");
451 } else if (!strncmp(str, "strict", 6)) {
452 pr_info("Disable batched IOTLB flush\n");
453 intel_iommu_strict = 1;
454 } else if (!strncmp(str, "sp_off", 6)) {
455 pr_info("Disable supported super page\n");
456 intel_iommu_superpage = 0;
457 } else if (!strncmp(str, "sm_on", 5)) {
458 pr_info("Intel-IOMMU: scalable mode supported\n");
460 } else if (!strncmp(str, "tboot_noforce", 13)) {
461 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
462 intel_iommu_tboot_noforce = 1;
463 } else if (!strncmp(str, "nobounce", 8)) {
464 pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
468 str += strcspn(str, ",");
474 __setup("intel_iommu=", intel_iommu_setup);
476 static struct kmem_cache *iommu_domain_cache;
477 static struct kmem_cache *iommu_devinfo_cache;
479 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
481 struct dmar_domain **domains;
484 domains = iommu->domains[idx];
488 return domains[did & 0xff];
491 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
492 struct dmar_domain *domain)
494 struct dmar_domain **domains;
497 if (!iommu->domains[idx]) {
498 size_t size = 256 * sizeof(struct dmar_domain *);
499 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
502 domains = iommu->domains[idx];
503 if (WARN_ON(!domains))
506 domains[did & 0xff] = domain;
509 void *alloc_pgtable_page(int node)
514 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
516 vaddr = page_address(page);
520 void free_pgtable_page(void *vaddr)
522 free_page((unsigned long)vaddr);
525 static inline void *alloc_domain_mem(void)
527 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
530 static void free_domain_mem(void *vaddr)
532 kmem_cache_free(iommu_domain_cache, vaddr);
535 static inline void * alloc_devinfo_mem(void)
537 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
540 static inline void free_devinfo_mem(void *vaddr)
542 kmem_cache_free(iommu_devinfo_cache, vaddr);
545 static inline int domain_type_is_si(struct dmar_domain *domain)
547 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
550 static inline bool domain_use_first_level(struct dmar_domain *domain)
552 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
555 static inline int domain_pfn_supported(struct dmar_domain *domain,
558 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
560 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
563 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
568 sagaw = cap_sagaw(iommu->cap);
569 for (agaw = width_to_agaw(max_gaw);
571 if (test_bit(agaw, &sagaw))
579 * Calculate max SAGAW for each iommu.
581 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
583 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
587 * calculate agaw for each iommu.
588 * "SAGAW" may be different across iommus, use a default agaw, and
589 * get a supported less agaw for iommus that don't support the default agaw.
591 int iommu_calculate_agaw(struct intel_iommu *iommu)
593 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
596 /* This functionin only returns single iommu in a domain */
597 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
601 /* si_domain and vm domain should not get here. */
602 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
605 for_each_domain_iommu(iommu_id, domain)
608 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
611 return g_iommus[iommu_id];
614 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
616 return sm_supported(iommu) ?
617 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
620 static void domain_update_iommu_coherency(struct dmar_domain *domain)
622 struct dmar_drhd_unit *drhd;
623 struct intel_iommu *iommu;
627 domain->iommu_coherency = 1;
629 for_each_domain_iommu(i, domain) {
631 if (!iommu_paging_structure_coherency(g_iommus[i])) {
632 domain->iommu_coherency = 0;
639 /* No hardware attached; use lowest common denominator */
641 for_each_active_iommu(iommu, drhd) {
642 if (!iommu_paging_structure_coherency(iommu)) {
643 domain->iommu_coherency = 0;
650 static int domain_update_iommu_snooping(struct intel_iommu *skip)
652 struct dmar_drhd_unit *drhd;
653 struct intel_iommu *iommu;
657 for_each_active_iommu(iommu, drhd) {
659 if (!ecap_sc_support(iommu->ecap)) {
670 static int domain_update_iommu_superpage(struct dmar_domain *domain,
671 struct intel_iommu *skip)
673 struct dmar_drhd_unit *drhd;
674 struct intel_iommu *iommu;
677 if (!intel_iommu_superpage) {
681 /* set iommu_superpage to the smallest common denominator */
683 for_each_active_iommu(iommu, drhd) {
685 if (domain && domain_use_first_level(domain)) {
686 if (!cap_fl1gp_support(iommu->cap))
689 mask &= cap_super_page_val(iommu->cap);
701 static int domain_update_device_node(struct dmar_domain *domain)
703 struct device_domain_info *info;
704 int nid = NUMA_NO_NODE;
706 assert_spin_locked(&device_domain_lock);
708 if (list_empty(&domain->devices))
711 list_for_each_entry(info, &domain->devices, link) {
716 * There could possibly be multiple device numa nodes as devices
717 * within the same domain may sit behind different IOMMUs. There
718 * isn't perfect answer in such situation, so we select first
719 * come first served policy.
721 nid = dev_to_node(info->dev);
722 if (nid != NUMA_NO_NODE)
729 /* Some capabilities may be different across iommus */
730 static void domain_update_iommu_cap(struct dmar_domain *domain)
732 domain_update_iommu_coherency(domain);
733 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
734 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
737 * If RHSA is missing, we should default to the device numa domain
740 if (domain->nid == NUMA_NO_NODE)
741 domain->nid = domain_update_device_node(domain);
744 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
747 struct root_entry *root = &iommu->root_entry[bus];
748 struct context_entry *context;
752 if (sm_supported(iommu)) {
760 context = phys_to_virt(*entry & VTD_PAGE_MASK);
762 unsigned long phy_addr;
766 context = alloc_pgtable_page(iommu->node);
770 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
771 phy_addr = virt_to_phys((void *)context);
772 *entry = phy_addr | 1;
773 __iommu_flush_cache(iommu, entry, sizeof(*entry));
775 return &context[devfn];
778 static bool attach_deferred(struct device *dev)
780 return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
784 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
785 * sub-hierarchy of a candidate PCI-PCI bridge
786 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
787 * @bridge: the candidate PCI-PCI bridge
789 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
792 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
794 struct pci_dev *pdev, *pbridge;
796 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
799 pdev = to_pci_dev(dev);
800 pbridge = to_pci_dev(bridge);
802 if (pbridge->subordinate &&
803 pbridge->subordinate->number <= pdev->bus->number &&
804 pbridge->subordinate->busn_res.end >= pdev->bus->number)
810 static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
812 struct dmar_drhd_unit *drhd;
816 /* We know that this device on this chipset has its own IOMMU.
817 * If we find it under a different IOMMU, then the BIOS is lying
818 * to us. Hope that the IOMMU for this device is actually
819 * disabled, and it needs no translation...
821 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
824 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
829 /* we know that the this iommu should be at offset 0xa000 from vtbar */
830 drhd = dmar_find_matched_drhd_unit(pdev);
831 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
832 pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
833 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
840 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
842 if (!iommu || iommu->drhd->ignored)
845 if (dev_is_pci(dev)) {
846 struct pci_dev *pdev = to_pci_dev(dev);
848 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
849 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
850 quirk_ioat_snb_local_iommu(pdev))
857 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
859 struct dmar_drhd_unit *drhd = NULL;
860 struct pci_dev *pdev = NULL;
861 struct intel_iommu *iommu;
869 if (dev_is_pci(dev)) {
870 struct pci_dev *pf_pdev;
872 pdev = pci_real_dma_dev(to_pci_dev(dev));
874 /* VFs aren't listed in scope tables; we need to look up
875 * the PF instead to find the IOMMU. */
876 pf_pdev = pci_physfn(pdev);
878 segment = pci_domain_nr(pdev->bus);
879 } else if (has_acpi_companion(dev))
880 dev = &ACPI_COMPANION(dev)->dev;
883 for_each_iommu(iommu, drhd) {
884 if (pdev && segment != drhd->segment)
887 for_each_active_dev_scope(drhd->devices,
888 drhd->devices_cnt, i, tmp) {
890 /* For a VF use its original BDF# not that of the PF
891 * which we used for the IOMMU lookup. Strictly speaking
892 * we could do this for all PCI devices; we only need to
893 * get the BDF# from the scope table for ACPI matches. */
894 if (pdev && pdev->is_virtfn)
898 *bus = drhd->devices[i].bus;
899 *devfn = drhd->devices[i].devfn;
904 if (is_downstream_to_pci_bridge(dev, tmp))
908 if (pdev && drhd->include_all) {
911 *bus = pdev->bus->number;
912 *devfn = pdev->devfn;
919 if (iommu_is_dummy(iommu, dev))
927 static void domain_flush_cache(struct dmar_domain *domain,
928 void *addr, int size)
930 if (!domain->iommu_coherency)
931 clflush_cache_range(addr, size);
934 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
936 struct context_entry *context;
940 spin_lock_irqsave(&iommu->lock, flags);
941 context = iommu_context_addr(iommu, bus, devfn, 0);
943 ret = context_present(context);
944 spin_unlock_irqrestore(&iommu->lock, flags);
948 static void free_context_table(struct intel_iommu *iommu)
952 struct context_entry *context;
954 spin_lock_irqsave(&iommu->lock, flags);
955 if (!iommu->root_entry) {
958 for (i = 0; i < ROOT_ENTRY_NR; i++) {
959 context = iommu_context_addr(iommu, i, 0, 0);
961 free_pgtable_page(context);
963 if (!sm_supported(iommu))
966 context = iommu_context_addr(iommu, i, 0x80, 0);
968 free_pgtable_page(context);
971 free_pgtable_page(iommu->root_entry);
972 iommu->root_entry = NULL;
974 spin_unlock_irqrestore(&iommu->lock, flags);
977 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
978 unsigned long pfn, int *target_level)
980 struct dma_pte *parent, *pte;
981 int level = agaw_to_level(domain->agaw);
984 BUG_ON(!domain->pgd);
986 if (!domain_pfn_supported(domain, pfn))
987 /* Address beyond IOMMU's addressing capabilities. */
990 parent = domain->pgd;
995 offset = pfn_level_offset(pfn, level);
996 pte = &parent[offset];
997 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
999 if (level == *target_level)
1002 if (!dma_pte_present(pte)) {
1005 tmp_page = alloc_pgtable_page(domain->nid);
1010 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1011 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1012 if (domain_use_first_level(domain))
1013 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
1014 if (cmpxchg64(&pte->val, 0ULL, pteval))
1015 /* Someone else set it while we were thinking; use theirs. */
1016 free_pgtable_page(tmp_page);
1018 domain_flush_cache(domain, pte, sizeof(*pte));
1023 parent = phys_to_virt(dma_pte_addr(pte));
1028 *target_level = level;
1033 /* return address's pte at specific level */
1034 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1036 int level, int *large_page)
1038 struct dma_pte *parent, *pte;
1039 int total = agaw_to_level(domain->agaw);
1042 parent = domain->pgd;
1043 while (level <= total) {
1044 offset = pfn_level_offset(pfn, total);
1045 pte = &parent[offset];
1049 if (!dma_pte_present(pte)) {
1050 *large_page = total;
1054 if (dma_pte_superpage(pte)) {
1055 *large_page = total;
1059 parent = phys_to_virt(dma_pte_addr(pte));
1065 /* clear last level pte, a tlb flush should be followed */
1066 static void dma_pte_clear_range(struct dmar_domain *domain,
1067 unsigned long start_pfn,
1068 unsigned long last_pfn)
1070 unsigned int large_page;
1071 struct dma_pte *first_pte, *pte;
1073 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1074 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1075 BUG_ON(start_pfn > last_pfn);
1077 /* we don't need lock here; nobody else touches the iova range */
1080 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1082 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1087 start_pfn += lvl_to_nr_pages(large_page);
1089 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1091 domain_flush_cache(domain, first_pte,
1092 (void *)pte - (void *)first_pte);
1094 } while (start_pfn && start_pfn <= last_pfn);
1097 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1098 int retain_level, struct dma_pte *pte,
1099 unsigned long pfn, unsigned long start_pfn,
1100 unsigned long last_pfn)
1102 pfn = max(start_pfn, pfn);
1103 pte = &pte[pfn_level_offset(pfn, level)];
1106 unsigned long level_pfn;
1107 struct dma_pte *level_pte;
1109 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1112 level_pfn = pfn & level_mask(level);
1113 level_pte = phys_to_virt(dma_pte_addr(pte));
1116 dma_pte_free_level(domain, level - 1, retain_level,
1117 level_pte, level_pfn, start_pfn,
1122 * Free the page table if we're below the level we want to
1123 * retain and the range covers the entire table.
1125 if (level < retain_level && !(start_pfn > level_pfn ||
1126 last_pfn < level_pfn + level_size(level) - 1)) {
1128 domain_flush_cache(domain, pte, sizeof(*pte));
1129 free_pgtable_page(level_pte);
1132 pfn += level_size(level);
1133 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1137 * clear last level (leaf) ptes and free page table pages below the
1138 * level we wish to keep intact.
1140 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1141 unsigned long start_pfn,
1142 unsigned long last_pfn,
1145 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1146 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1147 BUG_ON(start_pfn > last_pfn);
1149 dma_pte_clear_range(domain, start_pfn, last_pfn);
1151 /* We don't need lock here; nobody else touches the iova range */
1152 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1153 domain->pgd, 0, start_pfn, last_pfn);
1156 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1157 free_pgtable_page(domain->pgd);
1162 /* When a page at a given level is being unlinked from its parent, we don't
1163 need to *modify* it at all. All we need to do is make a list of all the
1164 pages which can be freed just as soon as we've flushed the IOTLB and we
1165 know the hardware page-walk will no longer touch them.
1166 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1168 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1169 int level, struct dma_pte *pte,
1170 struct page *freelist)
1174 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1175 pg->freelist = freelist;
1181 pte = page_address(pg);
1183 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1184 freelist = dma_pte_list_pagetables(domain, level - 1,
1187 } while (!first_pte_in_page(pte));
1192 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1193 struct dma_pte *pte, unsigned long pfn,
1194 unsigned long start_pfn,
1195 unsigned long last_pfn,
1196 struct page *freelist)
1198 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1200 pfn = max(start_pfn, pfn);
1201 pte = &pte[pfn_level_offset(pfn, level)];
1204 unsigned long level_pfn;
1206 if (!dma_pte_present(pte))
1209 level_pfn = pfn & level_mask(level);
1211 /* If range covers entire pagetable, free it */
1212 if (start_pfn <= level_pfn &&
1213 last_pfn >= level_pfn + level_size(level) - 1) {
1214 /* These suborbinate page tables are going away entirely. Don't
1215 bother to clear them; we're just going to *free* them. */
1216 if (level > 1 && !dma_pte_superpage(pte))
1217 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1223 } else if (level > 1) {
1224 /* Recurse down into a level that isn't *entirely* obsolete */
1225 freelist = dma_pte_clear_level(domain, level - 1,
1226 phys_to_virt(dma_pte_addr(pte)),
1227 level_pfn, start_pfn, last_pfn,
1231 pfn += level_size(level);
1232 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1235 domain_flush_cache(domain, first_pte,
1236 (void *)++last_pte - (void *)first_pte);
1241 /* We can't just free the pages because the IOMMU may still be walking
1242 the page tables, and may have cached the intermediate levels. The
1243 pages can only be freed after the IOTLB flush has been done. */
1244 static struct page *domain_unmap(struct dmar_domain *domain,
1245 unsigned long start_pfn,
1246 unsigned long last_pfn)
1248 struct page *freelist;
1250 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1251 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1252 BUG_ON(start_pfn > last_pfn);
1254 /* we don't need lock here; nobody else touches the iova range */
1255 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1256 domain->pgd, 0, start_pfn, last_pfn, NULL);
1259 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1260 struct page *pgd_page = virt_to_page(domain->pgd);
1261 pgd_page->freelist = freelist;
1262 freelist = pgd_page;
1270 static void dma_free_pagelist(struct page *freelist)
1274 while ((pg = freelist)) {
1275 freelist = pg->freelist;
1276 free_pgtable_page(page_address(pg));
1280 static void iova_entry_free(unsigned long data)
1282 struct page *freelist = (struct page *)data;
1284 dma_free_pagelist(freelist);
1287 /* iommu handling */
1288 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1290 struct root_entry *root;
1291 unsigned long flags;
1293 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1295 pr_err("Allocating root entry for %s failed\n",
1300 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1302 spin_lock_irqsave(&iommu->lock, flags);
1303 iommu->root_entry = root;
1304 spin_unlock_irqrestore(&iommu->lock, flags);
1309 static void iommu_set_root_entry(struct intel_iommu *iommu)
1315 addr = virt_to_phys(iommu->root_entry);
1316 if (sm_supported(iommu))
1317 addr |= DMA_RTADDR_SMT;
1319 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1320 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1322 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1324 /* Make sure hardware complete it */
1325 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1326 readl, (sts & DMA_GSTS_RTPS), sts);
1328 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1331 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1336 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1340 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1342 /* Make sure hardware complete it */
1343 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1344 readl, (!(val & DMA_GSTS_WBFS)), val);
1346 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1349 /* return value determine if we need a write buffer flush */
1350 static void __iommu_flush_context(struct intel_iommu *iommu,
1351 u16 did, u16 source_id, u8 function_mask,
1358 case DMA_CCMD_GLOBAL_INVL:
1359 val = DMA_CCMD_GLOBAL_INVL;
1361 case DMA_CCMD_DOMAIN_INVL:
1362 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1364 case DMA_CCMD_DEVICE_INVL:
1365 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1366 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1371 val |= DMA_CCMD_ICC;
1373 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1374 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1376 /* Make sure hardware complete it */
1377 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1378 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1380 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1383 /* return value determine if we need a write buffer flush */
1384 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1385 u64 addr, unsigned int size_order, u64 type)
1387 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1388 u64 val = 0, val_iva = 0;
1392 case DMA_TLB_GLOBAL_FLUSH:
1393 /* global flush doesn't need set IVA_REG */
1394 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1396 case DMA_TLB_DSI_FLUSH:
1397 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1399 case DMA_TLB_PSI_FLUSH:
1400 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1401 /* IH bit is passed in as part of address */
1402 val_iva = size_order | addr;
1407 /* Note: set drain read/write */
1410 * This is probably to be super secure.. Looks like we can
1411 * ignore it without any impact.
1413 if (cap_read_drain(iommu->cap))
1414 val |= DMA_TLB_READ_DRAIN;
1416 if (cap_write_drain(iommu->cap))
1417 val |= DMA_TLB_WRITE_DRAIN;
1419 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1420 /* Note: Only uses first TLB reg currently */
1422 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1423 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1425 /* Make sure hardware complete it */
1426 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1427 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1429 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1431 /* check IOTLB invalidation granularity */
1432 if (DMA_TLB_IAIG(val) == 0)
1433 pr_err("Flush IOTLB failed\n");
1434 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1435 pr_debug("TLB flush request %Lx, actual %Lx\n",
1436 (unsigned long long)DMA_TLB_IIRG(type),
1437 (unsigned long long)DMA_TLB_IAIG(val));
1440 static struct device_domain_info *
1441 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1444 struct device_domain_info *info;
1446 assert_spin_locked(&device_domain_lock);
1451 list_for_each_entry(info, &domain->devices, link)
1452 if (info->iommu == iommu && info->bus == bus &&
1453 info->devfn == devfn) {
1454 if (info->ats_supported && info->dev)
1462 static void domain_update_iotlb(struct dmar_domain *domain)
1464 struct device_domain_info *info;
1465 bool has_iotlb_device = false;
1467 assert_spin_locked(&device_domain_lock);
1469 list_for_each_entry(info, &domain->devices, link) {
1470 struct pci_dev *pdev;
1472 if (!info->dev || !dev_is_pci(info->dev))
1475 pdev = to_pci_dev(info->dev);
1476 if (pdev->ats_enabled) {
1477 has_iotlb_device = true;
1482 domain->has_iotlb_device = has_iotlb_device;
1485 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1487 struct pci_dev *pdev;
1489 assert_spin_locked(&device_domain_lock);
1491 if (!info || !dev_is_pci(info->dev))
1494 pdev = to_pci_dev(info->dev);
1495 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1496 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1497 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1498 * reserved, which should be set to 0.
1500 if (!ecap_dit(info->iommu->ecap))
1503 struct pci_dev *pf_pdev;
1505 /* pdev will be returned if device is not a vf */
1506 pf_pdev = pci_physfn(pdev);
1507 info->pfsid = pci_dev_id(pf_pdev);
1510 #ifdef CONFIG_INTEL_IOMMU_SVM
1511 /* The PCIe spec, in its wisdom, declares that the behaviour of
1512 the device if you enable PASID support after ATS support is
1513 undefined. So always enable PASID support on devices which
1514 have it, even if we can't yet know if we're ever going to
1516 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1517 info->pasid_enabled = 1;
1519 if (info->pri_supported &&
1520 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1521 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1522 info->pri_enabled = 1;
1524 if (info->ats_supported && pci_ats_page_aligned(pdev) &&
1525 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1526 info->ats_enabled = 1;
1527 domain_update_iotlb(info->domain);
1528 info->ats_qdep = pci_ats_queue_depth(pdev);
1532 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1534 struct pci_dev *pdev;
1536 assert_spin_locked(&device_domain_lock);
1538 if (!dev_is_pci(info->dev))
1541 pdev = to_pci_dev(info->dev);
1543 if (info->ats_enabled) {
1544 pci_disable_ats(pdev);
1545 info->ats_enabled = 0;
1546 domain_update_iotlb(info->domain);
1548 #ifdef CONFIG_INTEL_IOMMU_SVM
1549 if (info->pri_enabled) {
1550 pci_disable_pri(pdev);
1551 info->pri_enabled = 0;
1553 if (info->pasid_enabled) {
1554 pci_disable_pasid(pdev);
1555 info->pasid_enabled = 0;
1560 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1561 u64 addr, unsigned mask)
1564 unsigned long flags;
1565 struct device_domain_info *info;
1567 if (!domain->has_iotlb_device)
1570 spin_lock_irqsave(&device_domain_lock, flags);
1571 list_for_each_entry(info, &domain->devices, link) {
1572 if (!info->ats_enabled)
1575 sid = info->bus << 8 | info->devfn;
1576 qdep = info->ats_qdep;
1577 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1580 spin_unlock_irqrestore(&device_domain_lock, flags);
1583 static void domain_flush_piotlb(struct intel_iommu *iommu,
1584 struct dmar_domain *domain,
1585 u64 addr, unsigned long npages, bool ih)
1587 u16 did = domain->iommu_did[iommu->seq_id];
1589 if (domain->default_pasid)
1590 qi_flush_piotlb(iommu, did, domain->default_pasid,
1593 if (!list_empty(&domain->devices))
1594 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1597 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1598 struct dmar_domain *domain,
1599 unsigned long pfn, unsigned int pages,
1602 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1603 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1604 u16 did = domain->iommu_did[iommu->seq_id];
1611 if (domain_use_first_level(domain)) {
1612 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1615 * Fallback to domain selective flush if no PSI support or
1616 * the size is too big. PSI requires page size to be 2 ^ x,
1617 * and the base address is naturally aligned to the size.
1619 if (!cap_pgsel_inv(iommu->cap) ||
1620 mask > cap_max_amask_val(iommu->cap))
1621 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1624 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1629 * In caching mode, changes of pages from non-present to present require
1630 * flush. However, device IOTLB doesn't need to be flushed in this case.
1632 if (!cap_caching_mode(iommu->cap) || !map)
1633 iommu_flush_dev_iotlb(domain, addr, mask);
1636 /* Notification for newly created mappings */
1637 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1638 struct dmar_domain *domain,
1639 unsigned long pfn, unsigned int pages)
1642 * It's a non-present to present mapping. Only flush if caching mode
1645 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
1646 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1648 iommu_flush_write_buffer(iommu);
1651 static void iommu_flush_iova(struct iova_domain *iovad)
1653 struct dmar_domain *domain;
1656 domain = container_of(iovad, struct dmar_domain, iovad);
1658 for_each_domain_iommu(idx, domain) {
1659 struct intel_iommu *iommu = g_iommus[idx];
1660 u16 did = domain->iommu_did[iommu->seq_id];
1662 if (domain_use_first_level(domain))
1663 domain_flush_piotlb(iommu, domain, 0, -1, 0);
1665 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1668 if (!cap_caching_mode(iommu->cap))
1669 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1670 0, MAX_AGAW_PFN_WIDTH);
1674 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1677 unsigned long flags;
1679 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1682 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1683 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1684 pmen &= ~DMA_PMEN_EPM;
1685 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1687 /* wait for the protected region status bit to clear */
1688 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1689 readl, !(pmen & DMA_PMEN_PRS), pmen);
1691 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1694 static void iommu_enable_translation(struct intel_iommu *iommu)
1697 unsigned long flags;
1699 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1700 iommu->gcmd |= DMA_GCMD_TE;
1701 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1703 /* Make sure hardware complete it */
1704 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1705 readl, (sts & DMA_GSTS_TES), sts);
1707 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1710 static void iommu_disable_translation(struct intel_iommu *iommu)
1715 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1716 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1719 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1720 iommu->gcmd &= ~DMA_GCMD_TE;
1721 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1723 /* Make sure hardware complete it */
1724 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1725 readl, (!(sts & DMA_GSTS_TES)), sts);
1727 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1730 static int iommu_init_domains(struct intel_iommu *iommu)
1732 u32 ndomains, nlongs;
1735 ndomains = cap_ndoms(iommu->cap);
1736 pr_debug("%s: Number of Domains supported <%d>\n",
1737 iommu->name, ndomains);
1738 nlongs = BITS_TO_LONGS(ndomains);
1740 spin_lock_init(&iommu->lock);
1742 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1743 if (!iommu->domain_ids) {
1744 pr_err("%s: Allocating domain id array failed\n",
1749 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1750 iommu->domains = kzalloc(size, GFP_KERNEL);
1752 if (iommu->domains) {
1753 size = 256 * sizeof(struct dmar_domain *);
1754 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1757 if (!iommu->domains || !iommu->domains[0]) {
1758 pr_err("%s: Allocating domain array failed\n",
1760 kfree(iommu->domain_ids);
1761 kfree(iommu->domains);
1762 iommu->domain_ids = NULL;
1763 iommu->domains = NULL;
1768 * If Caching mode is set, then invalid translations are tagged
1769 * with domain-id 0, hence we need to pre-allocate it. We also
1770 * use domain-id 0 as a marker for non-allocated domain-id, so
1771 * make sure it is not used for a real domain.
1773 set_bit(0, iommu->domain_ids);
1776 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1777 * entry for first-level or pass-through translation modes should
1778 * be programmed with a domain id different from those used for
1779 * second-level or nested translation. We reserve a domain id for
1782 if (sm_supported(iommu))
1783 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1788 static void disable_dmar_iommu(struct intel_iommu *iommu)
1790 struct device_domain_info *info, *tmp;
1791 unsigned long flags;
1793 if (!iommu->domains || !iommu->domain_ids)
1796 spin_lock_irqsave(&device_domain_lock, flags);
1797 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1798 if (info->iommu != iommu)
1801 if (!info->dev || !info->domain)
1804 __dmar_remove_one_dev_info(info);
1806 spin_unlock_irqrestore(&device_domain_lock, flags);
1808 if (iommu->gcmd & DMA_GCMD_TE)
1809 iommu_disable_translation(iommu);
1812 static void free_dmar_iommu(struct intel_iommu *iommu)
1814 if ((iommu->domains) && (iommu->domain_ids)) {
1815 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1818 for (i = 0; i < elems; i++)
1819 kfree(iommu->domains[i]);
1820 kfree(iommu->domains);
1821 kfree(iommu->domain_ids);
1822 iommu->domains = NULL;
1823 iommu->domain_ids = NULL;
1826 g_iommus[iommu->seq_id] = NULL;
1828 /* free context mapping */
1829 free_context_table(iommu);
1831 #ifdef CONFIG_INTEL_IOMMU_SVM
1832 if (pasid_supported(iommu)) {
1833 if (ecap_prs(iommu->ecap))
1834 intel_svm_finish_prq(iommu);
1836 if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
1837 ioasid_unregister_allocator(&iommu->pasid_allocator);
1843 * Check and return whether first level is used by default for
1846 static bool first_level_by_default(void)
1848 struct dmar_drhd_unit *drhd;
1849 struct intel_iommu *iommu;
1850 static int first_level_support = -1;
1852 if (likely(first_level_support != -1))
1853 return first_level_support;
1855 first_level_support = 1;
1858 for_each_active_iommu(iommu, drhd) {
1859 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
1860 first_level_support = 0;
1866 return first_level_support;
1869 static struct dmar_domain *alloc_domain(int flags)
1871 struct dmar_domain *domain;
1873 domain = alloc_domain_mem();
1877 memset(domain, 0, sizeof(*domain));
1878 domain->nid = NUMA_NO_NODE;
1879 domain->flags = flags;
1880 if (first_level_by_default())
1881 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
1882 domain->has_iotlb_device = false;
1883 INIT_LIST_HEAD(&domain->devices);
1888 /* Must be called with iommu->lock */
1889 static int domain_attach_iommu(struct dmar_domain *domain,
1890 struct intel_iommu *iommu)
1892 unsigned long ndomains;
1895 assert_spin_locked(&device_domain_lock);
1896 assert_spin_locked(&iommu->lock);
1898 domain->iommu_refcnt[iommu->seq_id] += 1;
1899 domain->iommu_count += 1;
1900 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1901 ndomains = cap_ndoms(iommu->cap);
1902 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1904 if (num >= ndomains) {
1905 pr_err("%s: No free domain ids\n", iommu->name);
1906 domain->iommu_refcnt[iommu->seq_id] -= 1;
1907 domain->iommu_count -= 1;
1911 set_bit(num, iommu->domain_ids);
1912 set_iommu_domain(iommu, num, domain);
1914 domain->iommu_did[iommu->seq_id] = num;
1915 domain->nid = iommu->node;
1917 domain_update_iommu_cap(domain);
1923 static int domain_detach_iommu(struct dmar_domain *domain,
1924 struct intel_iommu *iommu)
1928 assert_spin_locked(&device_domain_lock);
1929 assert_spin_locked(&iommu->lock);
1931 domain->iommu_refcnt[iommu->seq_id] -= 1;
1932 count = --domain->iommu_count;
1933 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1934 num = domain->iommu_did[iommu->seq_id];
1935 clear_bit(num, iommu->domain_ids);
1936 set_iommu_domain(iommu, num, NULL);
1938 domain_update_iommu_cap(domain);
1939 domain->iommu_did[iommu->seq_id] = 0;
1945 static struct iova_domain reserved_iova_list;
1946 static struct lock_class_key reserved_rbtree_key;
1948 static int dmar_init_reserved_ranges(void)
1950 struct pci_dev *pdev = NULL;
1954 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1956 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1957 &reserved_rbtree_key);
1959 /* IOAPIC ranges shouldn't be accessed by DMA */
1960 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1961 IOVA_PFN(IOAPIC_RANGE_END));
1963 pr_err("Reserve IOAPIC range failed\n");
1967 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1968 for_each_pci_dev(pdev) {
1971 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1972 r = &pdev->resource[i];
1973 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1975 iova = reserve_iova(&reserved_iova_list,
1979 pci_err(pdev, "Reserve iova for %pR failed\n", r);
1987 static inline int guestwidth_to_adjustwidth(int gaw)
1990 int r = (gaw - 12) % 9;
2001 static void domain_exit(struct dmar_domain *domain)
2004 /* Remove associated devices and clear attached or cached domains */
2005 domain_remove_dev_info(domain);
2008 if (domain->domain.type == IOMMU_DOMAIN_DMA)
2009 put_iova_domain(&domain->iovad);
2012 struct page *freelist;
2014 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2015 dma_free_pagelist(freelist);
2018 free_domain_mem(domain);
2022 * Get the PASID directory size for scalable mode context entry.
2023 * Value of X in the PDTS field of a scalable mode context entry
2024 * indicates PASID directory with 2^(X + 7) entries.
2026 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
2030 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
2031 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
2039 * Set the RID_PASID field of a scalable mode context entry. The
2040 * IOMMU hardware will use the PASID value set in this field for
2041 * DMA translations of DMA requests without PASID.
2044 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2046 context->hi |= pasid & ((1 << 20) - 1);
2050 * Set the DTE(Device-TLB Enable) field of a scalable mode context
2053 static inline void context_set_sm_dte(struct context_entry *context)
2055 context->lo |= (1 << 2);
2059 * Set the PRE(Page Request Enable) field of a scalable mode context
2062 static inline void context_set_sm_pre(struct context_entry *context)
2064 context->lo |= (1 << 4);
2067 /* Convert value to context PASID directory size field coding. */
2068 #define context_pdts(pds) (((pds) & 0x7) << 9)
2070 static int domain_context_mapping_one(struct dmar_domain *domain,
2071 struct intel_iommu *iommu,
2072 struct pasid_table *table,
2075 u16 did = domain->iommu_did[iommu->seq_id];
2076 int translation = CONTEXT_TT_MULTI_LEVEL;
2077 struct device_domain_info *info = NULL;
2078 struct context_entry *context;
2079 unsigned long flags;
2084 if (hw_pass_through && domain_type_is_si(domain))
2085 translation = CONTEXT_TT_PASS_THROUGH;
2087 pr_debug("Set context mapping for %02x:%02x.%d\n",
2088 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2090 BUG_ON(!domain->pgd);
2092 spin_lock_irqsave(&device_domain_lock, flags);
2093 spin_lock(&iommu->lock);
2096 context = iommu_context_addr(iommu, bus, devfn, 1);
2101 if (context_present(context))
2105 * For kdump cases, old valid entries may be cached due to the
2106 * in-flight DMA and copied pgtable, but there is no unmapping
2107 * behaviour for them, thus we need an explicit cache flush for
2108 * the newly-mapped device. For kdump, at this point, the device
2109 * is supposed to finish reset at its driver probe stage, so no
2110 * in-flight DMA will exist, and we don't need to worry anymore
2113 if (context_copied(context)) {
2114 u16 did_old = context_domain_id(context);
2116 if (did_old < cap_ndoms(iommu->cap)) {
2117 iommu->flush.flush_context(iommu, did_old,
2118 (((u16)bus) << 8) | devfn,
2119 DMA_CCMD_MASK_NOBIT,
2120 DMA_CCMD_DEVICE_INVL);
2121 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2126 context_clear_entry(context);
2128 if (sm_supported(iommu)) {
2133 /* Setup the PASID DIR pointer: */
2134 pds = context_get_sm_pds(table);
2135 context->lo = (u64)virt_to_phys(table->table) |
2138 /* Setup the RID_PASID field: */
2139 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2142 * Setup the Device-TLB enable bit and Page request
2145 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2146 if (info && info->ats_supported)
2147 context_set_sm_dte(context);
2148 if (info && info->pri_supported)
2149 context_set_sm_pre(context);
2151 struct dma_pte *pgd = domain->pgd;
2154 context_set_domain_id(context, did);
2156 if (translation != CONTEXT_TT_PASS_THROUGH) {
2158 * Skip top levels of page tables for iommu which has
2159 * less agaw than default. Unnecessary for PT mode.
2161 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2163 pgd = phys_to_virt(dma_pte_addr(pgd));
2164 if (!dma_pte_present(pgd))
2168 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2169 if (info && info->ats_supported)
2170 translation = CONTEXT_TT_DEV_IOTLB;
2172 translation = CONTEXT_TT_MULTI_LEVEL;
2174 context_set_address_root(context, virt_to_phys(pgd));
2175 context_set_address_width(context, agaw);
2178 * In pass through mode, AW must be programmed to
2179 * indicate the largest AGAW value supported by
2180 * hardware. And ASR is ignored by hardware.
2182 context_set_address_width(context, iommu->msagaw);
2185 context_set_translation_type(context, translation);
2188 context_set_fault_enable(context);
2189 context_set_present(context);
2190 if (!ecap_coherent(iommu->ecap))
2191 clflush_cache_range(context, sizeof(*context));
2194 * It's a non-present to present mapping. If hardware doesn't cache
2195 * non-present entry we only need to flush the write-buffer. If the
2196 * _does_ cache non-present entries, then it does so in the special
2197 * domain #0, which we have to flush:
2199 if (cap_caching_mode(iommu->cap)) {
2200 iommu->flush.flush_context(iommu, 0,
2201 (((u16)bus) << 8) | devfn,
2202 DMA_CCMD_MASK_NOBIT,
2203 DMA_CCMD_DEVICE_INVL);
2204 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2206 iommu_flush_write_buffer(iommu);
2208 iommu_enable_dev_iotlb(info);
2213 spin_unlock(&iommu->lock);
2214 spin_unlock_irqrestore(&device_domain_lock, flags);
2219 struct domain_context_mapping_data {
2220 struct dmar_domain *domain;
2221 struct intel_iommu *iommu;
2222 struct pasid_table *table;
2225 static int domain_context_mapping_cb(struct pci_dev *pdev,
2226 u16 alias, void *opaque)
2228 struct domain_context_mapping_data *data = opaque;
2230 return domain_context_mapping_one(data->domain, data->iommu,
2231 data->table, PCI_BUS_NUM(alias),
2236 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2238 struct domain_context_mapping_data data;
2239 struct pasid_table *table;
2240 struct intel_iommu *iommu;
2243 iommu = device_to_iommu(dev, &bus, &devfn);
2247 table = intel_pasid_get_table(dev);
2249 if (!dev_is_pci(dev))
2250 return domain_context_mapping_one(domain, iommu, table,
2253 data.domain = domain;
2257 return pci_for_each_dma_alias(to_pci_dev(dev),
2258 &domain_context_mapping_cb, &data);
2261 static int domain_context_mapped_cb(struct pci_dev *pdev,
2262 u16 alias, void *opaque)
2264 struct intel_iommu *iommu = opaque;
2266 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2269 static int domain_context_mapped(struct device *dev)
2271 struct intel_iommu *iommu;
2274 iommu = device_to_iommu(dev, &bus, &devfn);
2278 if (!dev_is_pci(dev))
2279 return device_context_mapped(iommu, bus, devfn);
2281 return !pci_for_each_dma_alias(to_pci_dev(dev),
2282 domain_context_mapped_cb, iommu);
2285 /* Returns a number of VTD pages, but aligned to MM page size */
2286 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2289 host_addr &= ~PAGE_MASK;
2290 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2293 /* Return largest possible superpage level for a given mapping */
2294 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2295 unsigned long iov_pfn,
2296 unsigned long phy_pfn,
2297 unsigned long pages)
2299 int support, level = 1;
2300 unsigned long pfnmerge;
2302 support = domain->iommu_superpage;
2304 /* To use a large page, the virtual *and* physical addresses
2305 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2306 of them will mean we have to use smaller pages. So just
2307 merge them and check both at once. */
2308 pfnmerge = iov_pfn | phy_pfn;
2310 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2311 pages >>= VTD_STRIDE_SHIFT;
2314 pfnmerge >>= VTD_STRIDE_SHIFT;
2321 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2322 struct scatterlist *sg, unsigned long phys_pfn,
2323 unsigned long nr_pages, int prot)
2325 struct dma_pte *first_pte = NULL, *pte = NULL;
2327 unsigned long sg_res = 0;
2328 unsigned int largepage_lvl = 0;
2329 unsigned long lvl_pages = 0;
2332 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2334 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2337 attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2338 if (domain_use_first_level(domain))
2339 attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
2343 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
2346 while (nr_pages > 0) {
2350 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2352 sg_res = aligned_nrpages(sg->offset, sg->length);
2353 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2354 sg->dma_length = sg->length;
2355 pteval = (sg_phys(sg) - pgoff) | attr;
2356 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2360 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2362 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2365 /* It is large page*/
2366 if (largepage_lvl > 1) {
2367 unsigned long nr_superpages, end_pfn;
2369 pteval |= DMA_PTE_LARGE_PAGE;
2370 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2372 nr_superpages = sg_res / lvl_pages;
2373 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2376 * Ensure that old small page tables are
2377 * removed to make room for superpage(s).
2378 * We're adding new large pages, so make sure
2379 * we don't remove their parent tables.
2381 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2384 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2388 /* We don't need lock here, nobody else
2389 * touches the iova range
2391 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2393 static int dumps = 5;
2394 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2395 iov_pfn, tmp, (unsigned long long)pteval);
2398 debug_dma_dump_mappings(NULL);
2403 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2405 BUG_ON(nr_pages < lvl_pages);
2406 BUG_ON(sg_res < lvl_pages);
2408 nr_pages -= lvl_pages;
2409 iov_pfn += lvl_pages;
2410 phys_pfn += lvl_pages;
2411 pteval += lvl_pages * VTD_PAGE_SIZE;
2412 sg_res -= lvl_pages;
2414 /* If the next PTE would be the first in a new page, then we
2415 need to flush the cache on the entries we've just written.
2416 And then we'll need to recalculate 'pte', so clear it and
2417 let it get set again in the if (!pte) block above.
2419 If we're done (!nr_pages) we need to flush the cache too.
2421 Also if we've been setting superpages, we may need to
2422 recalculate 'pte' and switch back to smaller pages for the
2423 end of the mapping, if the trailing size is not enough to
2424 use another superpage (i.e. sg_res < lvl_pages). */
2426 if (!nr_pages || first_pte_in_page(pte) ||
2427 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2428 domain_flush_cache(domain, first_pte,
2429 (void *)pte - (void *)first_pte);
2433 if (!sg_res && nr_pages)
2439 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2440 struct scatterlist *sg, unsigned long phys_pfn,
2441 unsigned long nr_pages, int prot)
2444 struct intel_iommu *iommu;
2446 /* Do the real mapping first */
2447 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2451 for_each_domain_iommu(iommu_id, domain) {
2452 iommu = g_iommus[iommu_id];
2453 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2459 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2460 struct scatterlist *sg, unsigned long nr_pages,
2463 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2466 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2467 unsigned long phys_pfn, unsigned long nr_pages,
2470 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2473 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2475 unsigned long flags;
2476 struct context_entry *context;
2482 spin_lock_irqsave(&iommu->lock, flags);
2483 context = iommu_context_addr(iommu, bus, devfn, 0);
2485 spin_unlock_irqrestore(&iommu->lock, flags);
2488 did_old = context_domain_id(context);
2489 context_clear_entry(context);
2490 __iommu_flush_cache(iommu, context, sizeof(*context));
2491 spin_unlock_irqrestore(&iommu->lock, flags);
2492 iommu->flush.flush_context(iommu,
2494 (((u16)bus) << 8) | devfn,
2495 DMA_CCMD_MASK_NOBIT,
2496 DMA_CCMD_DEVICE_INVL);
2497 iommu->flush.flush_iotlb(iommu,
2504 static inline void unlink_domain_info(struct device_domain_info *info)
2506 assert_spin_locked(&device_domain_lock);
2507 list_del(&info->link);
2508 list_del(&info->global);
2510 dev_iommu_priv_set(info->dev, NULL);
2513 static void domain_remove_dev_info(struct dmar_domain *domain)
2515 struct device_domain_info *info, *tmp;
2516 unsigned long flags;
2518 spin_lock_irqsave(&device_domain_lock, flags);
2519 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2520 __dmar_remove_one_dev_info(info);
2521 spin_unlock_irqrestore(&device_domain_lock, flags);
2524 struct dmar_domain *find_domain(struct device *dev)
2526 struct device_domain_info *info;
2528 if (unlikely(attach_deferred(dev)))
2531 /* No lock here, assumes no domain exit in normal case */
2532 info = get_domain_info(dev);
2534 return info->domain;
2539 static void do_deferred_attach(struct device *dev)
2541 struct iommu_domain *domain;
2543 dev_iommu_priv_set(dev, NULL);
2544 domain = iommu_get_domain_for_dev(dev);
2546 intel_iommu_attach_device(domain, dev);
2549 static inline struct device_domain_info *
2550 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2552 struct device_domain_info *info;
2554 list_for_each_entry(info, &device_domain_list, global)
2555 if (info->segment == segment && info->bus == bus &&
2556 info->devfn == devfn)
2562 static int domain_setup_first_level(struct intel_iommu *iommu,
2563 struct dmar_domain *domain,
2567 int flags = PASID_FLAG_SUPERVISOR_MODE;
2568 struct dma_pte *pgd = domain->pgd;
2572 * Skip top levels of page tables for iommu which has
2573 * less agaw than default. Unnecessary for PT mode.
2575 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2576 pgd = phys_to_virt(dma_pte_addr(pgd));
2577 if (!dma_pte_present(pgd))
2581 level = agaw_to_level(agaw);
2582 if (level != 4 && level != 5)
2585 flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
2587 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2588 domain->iommu_did[iommu->seq_id],
2592 static bool dev_is_real_dma_subdevice(struct device *dev)
2594 return dev && dev_is_pci(dev) &&
2595 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2598 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2601 struct dmar_domain *domain)
2603 struct dmar_domain *found = NULL;
2604 struct device_domain_info *info;
2605 unsigned long flags;
2608 info = alloc_devinfo_mem();
2612 if (!dev_is_real_dma_subdevice(dev)) {
2614 info->devfn = devfn;
2615 info->segment = iommu->segment;
2617 struct pci_dev *pdev = to_pci_dev(dev);
2619 info->bus = pdev->bus->number;
2620 info->devfn = pdev->devfn;
2621 info->segment = pci_domain_nr(pdev->bus);
2624 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2625 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2628 info->domain = domain;
2629 info->iommu = iommu;
2630 info->pasid_table = NULL;
2631 info->auxd_enabled = 0;
2632 INIT_LIST_HEAD(&info->auxiliary_domains);
2634 if (dev && dev_is_pci(dev)) {
2635 struct pci_dev *pdev = to_pci_dev(info->dev);
2637 if (ecap_dev_iotlb_support(iommu->ecap) &&
2638 pci_ats_supported(pdev) &&
2639 dmar_find_matched_atsr_unit(pdev))
2640 info->ats_supported = 1;
2642 if (sm_supported(iommu)) {
2643 if (pasid_supported(iommu)) {
2644 int features = pci_pasid_features(pdev);
2646 info->pasid_supported = features | 1;
2649 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2650 pci_pri_supported(pdev))
2651 info->pri_supported = 1;
2655 spin_lock_irqsave(&device_domain_lock, flags);
2657 found = find_domain(dev);
2660 struct device_domain_info *info2;
2661 info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
2664 found = info2->domain;
2670 spin_unlock_irqrestore(&device_domain_lock, flags);
2671 free_devinfo_mem(info);
2672 /* Caller must free the original domain */
2676 spin_lock(&iommu->lock);
2677 ret = domain_attach_iommu(domain, iommu);
2678 spin_unlock(&iommu->lock);
2681 spin_unlock_irqrestore(&device_domain_lock, flags);
2682 free_devinfo_mem(info);
2686 list_add(&info->link, &domain->devices);
2687 list_add(&info->global, &device_domain_list);
2689 dev_iommu_priv_set(dev, info);
2690 spin_unlock_irqrestore(&device_domain_lock, flags);
2692 /* PASID table is mandatory for a PCI device in scalable mode. */
2693 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2694 ret = intel_pasid_alloc_table(dev);
2696 dev_err(dev, "PASID table allocation failed\n");
2697 dmar_remove_one_dev_info(dev);
2701 /* Setup the PASID entry for requests without PASID: */
2702 spin_lock_irqsave(&iommu->lock, flags);
2703 if (hw_pass_through && domain_type_is_si(domain))
2704 ret = intel_pasid_setup_pass_through(iommu, domain,
2705 dev, PASID_RID2PASID);
2706 else if (domain_use_first_level(domain))
2707 ret = domain_setup_first_level(iommu, domain, dev,
2710 ret = intel_pasid_setup_second_level(iommu, domain,
2711 dev, PASID_RID2PASID);
2712 spin_unlock_irqrestore(&iommu->lock, flags);
2714 dev_err(dev, "Setup RID2PASID failed\n");
2715 dmar_remove_one_dev_info(dev);
2720 if (dev && domain_context_mapping(domain, dev)) {
2721 dev_err(dev, "Domain context map failed\n");
2722 dmar_remove_one_dev_info(dev);
2729 static int iommu_domain_identity_map(struct dmar_domain *domain,
2730 unsigned long first_vpfn,
2731 unsigned long last_vpfn)
2734 * RMRR range might have overlap with physical memory range,
2737 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2739 return __domain_mapping(domain, first_vpfn, NULL,
2740 first_vpfn, last_vpfn - first_vpfn + 1,
2741 DMA_PTE_READ|DMA_PTE_WRITE);
2744 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2746 static int __init si_domain_init(int hw)
2748 struct dmar_rmrr_unit *rmrr;
2752 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2756 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2757 domain_exit(si_domain);
2764 for_each_online_node(nid) {
2765 unsigned long start_pfn, end_pfn;
2768 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2769 ret = iommu_domain_identity_map(si_domain,
2770 mm_to_dma_pfn(start_pfn),
2771 mm_to_dma_pfn(end_pfn));
2778 * Identity map the RMRRs so that devices with RMRRs could also use
2781 for_each_rmrr_units(rmrr) {
2782 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2784 unsigned long long start = rmrr->base_address;
2785 unsigned long long end = rmrr->end_address;
2787 if (WARN_ON(end < start ||
2788 end >> agaw_to_width(si_domain->agaw)))
2791 ret = iommu_domain_identity_map(si_domain,
2792 mm_to_dma_pfn(start >> PAGE_SHIFT),
2793 mm_to_dma_pfn(end >> PAGE_SHIFT));
2802 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2804 struct dmar_domain *ndomain;
2805 struct intel_iommu *iommu;
2808 iommu = device_to_iommu(dev, &bus, &devfn);
2812 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2813 if (ndomain != domain)
2819 static bool device_has_rmrr(struct device *dev)
2821 struct dmar_rmrr_unit *rmrr;
2826 for_each_rmrr_units(rmrr) {
2828 * Return TRUE if this RMRR contains the device that
2831 for_each_active_dev_scope(rmrr->devices,
2832 rmrr->devices_cnt, i, tmp)
2834 is_downstream_to_pci_bridge(dev, tmp)) {
2844 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2845 * is relaxable (ie. is allowed to be not enforced under some conditions)
2846 * @dev: device handle
2848 * We assume that PCI USB devices with RMRRs have them largely
2849 * for historical reasons and that the RMRR space is not actively used post
2850 * boot. This exclusion may change if vendors begin to abuse it.
2852 * The same exception is made for graphics devices, with the requirement that
2853 * any use of the RMRR regions will be torn down before assigning the device
2856 * Return: true if the RMRR is relaxable, false otherwise
2858 static bool device_rmrr_is_relaxable(struct device *dev)
2860 struct pci_dev *pdev;
2862 if (!dev_is_pci(dev))
2865 pdev = to_pci_dev(dev);
2866 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2873 * There are a couple cases where we need to restrict the functionality of
2874 * devices associated with RMRRs. The first is when evaluating a device for
2875 * identity mapping because problems exist when devices are moved in and out
2876 * of domains and their respective RMRR information is lost. This means that
2877 * a device with associated RMRRs will never be in a "passthrough" domain.
2878 * The second is use of the device through the IOMMU API. This interface
2879 * expects to have full control of the IOVA space for the device. We cannot
2880 * satisfy both the requirement that RMRR access is maintained and have an
2881 * unencumbered IOVA space. We also have no ability to quiesce the device's
2882 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2883 * We therefore prevent devices associated with an RMRR from participating in
2884 * the IOMMU API, which eliminates them from device assignment.
2886 * In both cases, devices which have relaxable RMRRs are not concerned by this
2887 * restriction. See device_rmrr_is_relaxable comment.
2889 static bool device_is_rmrr_locked(struct device *dev)
2891 if (!device_has_rmrr(dev))
2894 if (device_rmrr_is_relaxable(dev))
2901 * Return the required default domain type for a specific device.
2903 * @dev: the device in query
2904 * @startup: true if this is during early boot
2907 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2908 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2909 * - 0: both identity and dynamic domains work for this device
2911 static int device_def_domain_type(struct device *dev)
2913 if (dev_is_pci(dev)) {
2914 struct pci_dev *pdev = to_pci_dev(dev);
2917 * Prevent any device marked as untrusted from getting
2918 * placed into the statically identity mapping domain.
2920 if (pdev->untrusted)
2921 return IOMMU_DOMAIN_DMA;
2923 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2924 return IOMMU_DOMAIN_IDENTITY;
2926 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2927 return IOMMU_DOMAIN_IDENTITY;
2933 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2936 * Start from the sane iommu hardware state.
2937 * If the queued invalidation is already initialized by us
2938 * (for example, while enabling interrupt-remapping) then
2939 * we got the things already rolling from a sane state.
2943 * Clear any previous faults.
2945 dmar_fault(-1, iommu);
2947 * Disable queued invalidation if supported and already enabled
2948 * before OS handover.
2950 dmar_disable_qi(iommu);
2953 if (dmar_enable_qi(iommu)) {
2955 * Queued Invalidate not enabled, use Register Based Invalidate
2957 iommu->flush.flush_context = __iommu_flush_context;
2958 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2959 pr_info("%s: Using Register based invalidation\n",
2962 iommu->flush.flush_context = qi_flush_context;
2963 iommu->flush.flush_iotlb = qi_flush_iotlb;
2964 pr_info("%s: Using Queued invalidation\n", iommu->name);
2968 static int copy_context_table(struct intel_iommu *iommu,
2969 struct root_entry *old_re,
2970 struct context_entry **tbl,
2973 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2974 struct context_entry *new_ce = NULL, ce;
2975 struct context_entry *old_ce = NULL;
2976 struct root_entry re;
2977 phys_addr_t old_ce_phys;
2979 tbl_idx = ext ? bus * 2 : bus;
2980 memcpy(&re, old_re, sizeof(re));
2982 for (devfn = 0; devfn < 256; devfn++) {
2983 /* First calculate the correct index */
2984 idx = (ext ? devfn * 2 : devfn) % 256;
2987 /* First save what we may have and clean up */
2989 tbl[tbl_idx] = new_ce;
2990 __iommu_flush_cache(iommu, new_ce,
3000 old_ce_phys = root_entry_lctp(&re);
3002 old_ce_phys = root_entry_uctp(&re);
3005 if (ext && devfn == 0) {
3006 /* No LCTP, try UCTP */
3015 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3020 new_ce = alloc_pgtable_page(iommu->node);
3027 /* Now copy the context entry */
3028 memcpy(&ce, old_ce + idx, sizeof(ce));
3030 if (!__context_present(&ce))
3033 did = context_domain_id(&ce);
3034 if (did >= 0 && did < cap_ndoms(iommu->cap))
3035 set_bit(did, iommu->domain_ids);
3038 * We need a marker for copied context entries. This
3039 * marker needs to work for the old format as well as
3040 * for extended context entries.
3042 * Bit 67 of the context entry is used. In the old
3043 * format this bit is available to software, in the
3044 * extended format it is the PGE bit, but PGE is ignored
3045 * by HW if PASIDs are disabled (and thus still
3048 * So disable PASIDs first and then mark the entry
3049 * copied. This means that we don't copy PASID
3050 * translations from the old kernel, but this is fine as
3051 * faults there are not fatal.
3053 context_clear_pasid_enable(&ce);
3054 context_set_copied(&ce);
3059 tbl[tbl_idx + pos] = new_ce;
3061 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3070 static int copy_translation_tables(struct intel_iommu *iommu)
3072 struct context_entry **ctxt_tbls;
3073 struct root_entry *old_rt;
3074 phys_addr_t old_rt_phys;
3075 int ctxt_table_entries;
3076 unsigned long flags;
3081 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3082 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3083 new_ext = !!ecap_ecs(iommu->ecap);
3086 * The RTT bit can only be changed when translation is disabled,
3087 * but disabling translation means to open a window for data
3088 * corruption. So bail out and don't copy anything if we would
3089 * have to change the bit.
3094 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3098 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3102 /* This is too big for the stack - allocate it from slab */
3103 ctxt_table_entries = ext ? 512 : 256;
3105 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3109 for (bus = 0; bus < 256; bus++) {
3110 ret = copy_context_table(iommu, &old_rt[bus],
3111 ctxt_tbls, bus, ext);
3113 pr_err("%s: Failed to copy context table for bus %d\n",
3119 spin_lock_irqsave(&iommu->lock, flags);
3121 /* Context tables are copied, now write them to the root_entry table */
3122 for (bus = 0; bus < 256; bus++) {
3123 int idx = ext ? bus * 2 : bus;
3126 if (ctxt_tbls[idx]) {
3127 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3128 iommu->root_entry[bus].lo = val;
3131 if (!ext || !ctxt_tbls[idx + 1])
3134 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3135 iommu->root_entry[bus].hi = val;
3138 spin_unlock_irqrestore(&iommu->lock, flags);
3142 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3152 #ifdef CONFIG_INTEL_IOMMU_SVM
3153 static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
3155 struct intel_iommu *iommu = data;
3159 return INVALID_IOASID;
3161 * VT-d virtual command interface always uses the full 20 bit
3162 * PASID range. Host can partition guest PASID range based on
3163 * policies but it is out of guest's control.
3165 if (min < PASID_MIN || max > intel_pasid_max_id)
3166 return INVALID_IOASID;
3168 if (vcmd_alloc_pasid(iommu, &ioasid))
3169 return INVALID_IOASID;
3174 static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
3176 struct intel_iommu *iommu = data;
3181 * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
3182 * We can only free the PASID when all the devices are unbound.
3184 if (ioasid_find(NULL, ioasid, NULL)) {
3185 pr_alert("Cannot free active IOASID %d\n", ioasid);
3188 vcmd_free_pasid(iommu, ioasid);
3191 static void register_pasid_allocator(struct intel_iommu *iommu)
3194 * If we are running in the host, no need for custom allocator
3195 * in that PASIDs are allocated from the host system-wide.
3197 if (!cap_caching_mode(iommu->cap))
3200 if (!sm_supported(iommu)) {
3201 pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
3206 * Register a custom PASID allocator if we are running in a guest,
3207 * guest PASID must be obtained via virtual command interface.
3208 * There can be multiple vIOMMUs in each guest but only one allocator
3209 * is active. All vIOMMU allocators will eventually be calling the same
3212 if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
3215 pr_info("Register custom PASID allocator\n");
3216 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3217 iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3218 iommu->pasid_allocator.pdata = (void *)iommu;
3219 if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3220 pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
3222 * Disable scalable mode on this IOMMU if there
3223 * is no custom allocator. Mixing SM capable vIOMMU
3224 * and non-SM vIOMMU are not supported.
3231 static int __init init_dmars(void)
3233 struct dmar_drhd_unit *drhd;
3234 struct intel_iommu *iommu;
3240 * initialize and program root entry to not present
3243 for_each_drhd_unit(drhd) {
3245 * lock not needed as this is only incremented in the single
3246 * threaded kernel __init code path all other access are read
3249 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3253 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3256 /* Preallocate enough resources for IOMMU hot-addition */
3257 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3258 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3260 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3263 pr_err("Allocating global iommu array failed\n");
3268 for_each_iommu(iommu, drhd) {
3269 if (drhd->ignored) {
3270 iommu_disable_translation(iommu);
3275 * Find the max pasid size of all IOMMU's in the system.
3276 * We need to ensure the system pasid table is no bigger
3277 * than the smallest supported.
3279 if (pasid_supported(iommu)) {
3280 u32 temp = 2 << ecap_pss(iommu->ecap);
3282 intel_pasid_max_id = min_t(u32, temp,
3283 intel_pasid_max_id);
3286 g_iommus[iommu->seq_id] = iommu;
3288 intel_iommu_init_qi(iommu);
3290 ret = iommu_init_domains(iommu);
3294 init_translation_status(iommu);
3296 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3297 iommu_disable_translation(iommu);
3298 clear_translation_pre_enabled(iommu);
3299 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3305 * we could share the same root & context tables
3306 * among all IOMMU's. Need to Split it later.
3308 ret = iommu_alloc_root_entry(iommu);
3312 if (translation_pre_enabled(iommu)) {
3313 pr_info("Translation already enabled - trying to copy translation structures\n");
3315 ret = copy_translation_tables(iommu);
3318 * We found the IOMMU with translation
3319 * enabled - but failed to copy over the
3320 * old root-entry table. Try to proceed
3321 * by disabling translation now and
3322 * allocating a clean root-entry table.
3323 * This might cause DMAR faults, but
3324 * probably the dump will still succeed.
3326 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3328 iommu_disable_translation(iommu);
3329 clear_translation_pre_enabled(iommu);
3331 pr_info("Copied translation tables from previous kernel for %s\n",
3336 if (!ecap_pass_through(iommu->ecap))
3337 hw_pass_through = 0;
3338 intel_svm_check(iommu);
3342 * Now that qi is enabled on all iommus, set the root entry and flush
3343 * caches. This is required on some Intel X58 chipsets, otherwise the
3344 * flush_context function will loop forever and the boot hangs.
3346 for_each_active_iommu(iommu, drhd) {
3347 iommu_flush_write_buffer(iommu);
3348 #ifdef CONFIG_INTEL_IOMMU_SVM
3349 register_pasid_allocator(iommu);
3351 iommu_set_root_entry(iommu);
3352 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3353 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3356 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3361 iommu_identity_mapping |= IDENTMAP_GFX;
3363 check_tylersburg_isoch();
3365 ret = si_domain_init(hw_pass_through);
3372 * global invalidate context cache
3373 * global invalidate iotlb
3374 * enable translation
3376 for_each_iommu(iommu, drhd) {
3377 if (drhd->ignored) {
3379 * we always have to disable PMRs or DMA may fail on
3383 iommu_disable_protect_mem_regions(iommu);
3387 iommu_flush_write_buffer(iommu);
3389 #ifdef CONFIG_INTEL_IOMMU_SVM
3390 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3392 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3393 * could cause possible lock race condition.
3395 up_write(&dmar_global_lock);
3396 ret = intel_svm_enable_prq(iommu);
3397 down_write(&dmar_global_lock);
3402 ret = dmar_set_interrupt(iommu);
3410 for_each_active_iommu(iommu, drhd) {
3411 disable_dmar_iommu(iommu);
3412 free_dmar_iommu(iommu);
3421 /* This takes a number of _MM_ pages, not VTD pages */
3422 static unsigned long intel_alloc_iova(struct device *dev,
3423 struct dmar_domain *domain,
3424 unsigned long nrpages, uint64_t dma_mask)
3426 unsigned long iova_pfn;
3429 * Restrict dma_mask to the width that the iommu can handle.
3430 * First-level translation restricts the input-address to a
3431 * canonical address (i.e., address bits 63:N have the same
3432 * value as address bit [N-1], where N is 48-bits with 4-level
3433 * paging and 57-bits with 5-level paging). Hence, skip bit
3436 if (domain_use_first_level(domain))
3437 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
3440 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
3443 /* Ensure we reserve the whole size-aligned region */
3444 nrpages = __roundup_pow_of_two(nrpages);
3446 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3448 * First try to allocate an io virtual address in
3449 * DMA_BIT_MASK(32) and if that fails then try allocating
3452 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3453 IOVA_PFN(DMA_BIT_MASK(32)), false);
3457 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3458 IOVA_PFN(dma_mask), true);
3459 if (unlikely(!iova_pfn)) {
3460 dev_err_once(dev, "Allocating %ld-page iova failed\n",
3468 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3469 size_t size, int dir, u64 dma_mask)
3471 struct dmar_domain *domain;
3472 phys_addr_t start_paddr;
3473 unsigned long iova_pfn;
3476 struct intel_iommu *iommu;
3477 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3479 BUG_ON(dir == DMA_NONE);
3481 if (unlikely(attach_deferred(dev)))
3482 do_deferred_attach(dev);
3484 domain = find_domain(dev);
3486 return DMA_MAPPING_ERROR;
3488 iommu = domain_get_iommu(domain);
3489 size = aligned_nrpages(paddr, size);
3491 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3496 * Check if DMAR supports zero-length reads on write only
3499 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3500 !cap_zlr(iommu->cap))
3501 prot |= DMA_PTE_READ;
3502 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3503 prot |= DMA_PTE_WRITE;
3505 * paddr - (paddr + size) might be partial page, we should map the whole
3506 * page. Note: if two part of one page are separately mapped, we
3507 * might have two guest_addr mapping to the same host paddr, but this
3508 * is not a big problem
3510 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3511 mm_to_dma_pfn(paddr_pfn), size, prot);
3515 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3516 start_paddr += paddr & ~PAGE_MASK;
3518 trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
3524 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3525 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3526 size, (unsigned long long)paddr, dir);
3527 return DMA_MAPPING_ERROR;
3530 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3531 unsigned long offset, size_t size,
3532 enum dma_data_direction dir,
3533 unsigned long attrs)
3535 return __intel_map_single(dev, page_to_phys(page) + offset,
3536 size, dir, *dev->dma_mask);
3539 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3540 size_t size, enum dma_data_direction dir,
3541 unsigned long attrs)
3543 return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
3546 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3548 struct dmar_domain *domain;
3549 unsigned long start_pfn, last_pfn;
3550 unsigned long nrpages;
3551 unsigned long iova_pfn;
3552 struct intel_iommu *iommu;
3553 struct page *freelist;
3554 struct pci_dev *pdev = NULL;
3556 domain = find_domain(dev);
3559 iommu = domain_get_iommu(domain);
3561 iova_pfn = IOVA_PFN(dev_addr);
3563 nrpages = aligned_nrpages(dev_addr, size);
3564 start_pfn = mm_to_dma_pfn(iova_pfn);
3565 last_pfn = start_pfn + nrpages - 1;
3567 if (dev_is_pci(dev))
3568 pdev = to_pci_dev(dev);
3570 freelist = domain_unmap(domain, start_pfn, last_pfn);
3571 if (intel_iommu_strict || (pdev && pdev->untrusted) ||
3572 !has_iova_flush_queue(&domain->iovad)) {
3573 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3574 nrpages, !freelist, 0);
3576 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3577 dma_free_pagelist(freelist);
3579 queue_iova(&domain->iovad, iova_pfn, nrpages,
3580 (unsigned long)freelist);
3582 * queue up the release of the unmap to save the 1/6th of the
3583 * cpu used up by the iotlb flush operation...
3587 trace_unmap_single(dev, dev_addr, size);
3590 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3591 size_t size, enum dma_data_direction dir,
3592 unsigned long attrs)
3594 intel_unmap(dev, dev_addr, size);
3597 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3598 size_t size, enum dma_data_direction dir, unsigned long attrs)
3600 intel_unmap(dev, dev_addr, size);
3603 static void *intel_alloc_coherent(struct device *dev, size_t size,
3604 dma_addr_t *dma_handle, gfp_t flags,
3605 unsigned long attrs)
3607 struct page *page = NULL;
3610 if (unlikely(attach_deferred(dev)))
3611 do_deferred_attach(dev);
3613 size = PAGE_ALIGN(size);
3614 order = get_order(size);
3616 if (gfpflags_allow_blocking(flags)) {
3617 unsigned int count = size >> PAGE_SHIFT;
3619 page = dma_alloc_from_contiguous(dev, count, order,
3620 flags & __GFP_NOWARN);
3624 page = alloc_pages(flags, order);
3627 memset(page_address(page), 0, size);
3629 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3631 dev->coherent_dma_mask);
3632 if (*dma_handle != DMA_MAPPING_ERROR)
3633 return page_address(page);
3634 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3635 __free_pages(page, order);
3640 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3641 dma_addr_t dma_handle, unsigned long attrs)
3644 struct page *page = virt_to_page(vaddr);
3646 size = PAGE_ALIGN(size);
3647 order = get_order(size);
3649 intel_unmap(dev, dma_handle, size);
3650 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3651 __free_pages(page, order);
3654 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3655 int nelems, enum dma_data_direction dir,
3656 unsigned long attrs)
3658 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3659 unsigned long nrpages = 0;
3660 struct scatterlist *sg;
3663 for_each_sg(sglist, sg, nelems, i) {
3664 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3667 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3669 trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3672 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3673 enum dma_data_direction dir, unsigned long attrs)
3676 struct dmar_domain *domain;
3679 unsigned long iova_pfn;
3681 struct scatterlist *sg;
3682 unsigned long start_vpfn;
3683 struct intel_iommu *iommu;
3685 BUG_ON(dir == DMA_NONE);
3687 if (unlikely(attach_deferred(dev)))
3688 do_deferred_attach(dev);
3690 domain = find_domain(dev);
3694 iommu = domain_get_iommu(domain);
3696 for_each_sg(sglist, sg, nelems, i)
3697 size += aligned_nrpages(sg->offset, sg->length);
3699 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3702 sglist->dma_length = 0;
3707 * Check if DMAR supports zero-length reads on write only
3710 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3711 !cap_zlr(iommu->cap))
3712 prot |= DMA_PTE_READ;
3713 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3714 prot |= DMA_PTE_WRITE;
3716 start_vpfn = mm_to_dma_pfn(iova_pfn);
3718 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3719 if (unlikely(ret)) {
3720 dma_pte_free_pagetable(domain, start_vpfn,
3721 start_vpfn + size - 1,
3722 agaw_to_level(domain->agaw) + 1);
3723 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3727 for_each_sg(sglist, sg, nelems, i)
3728 trace_map_sg(dev, i + 1, nelems, sg);
3733 static u64 intel_get_required_mask(struct device *dev)
3735 return DMA_BIT_MASK(32);
3738 static const struct dma_map_ops intel_dma_ops = {
3739 .alloc = intel_alloc_coherent,
3740 .free = intel_free_coherent,
3741 .map_sg = intel_map_sg,
3742 .unmap_sg = intel_unmap_sg,
3743 .map_page = intel_map_page,
3744 .unmap_page = intel_unmap_page,
3745 .map_resource = intel_map_resource,
3746 .unmap_resource = intel_unmap_resource,
3747 .dma_supported = dma_direct_supported,
3748 .mmap = dma_common_mmap,
3749 .get_sgtable = dma_common_get_sgtable,
3750 .get_required_mask = intel_get_required_mask,
3754 bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
3755 enum dma_data_direction dir, enum dma_sync_target target)
3757 struct dmar_domain *domain;
3758 phys_addr_t tlb_addr;
3760 domain = find_domain(dev);
3761 if (WARN_ON(!domain))
3764 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
3765 if (is_swiotlb_buffer(tlb_addr))
3766 swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
3770 bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
3771 enum dma_data_direction dir, unsigned long attrs,
3774 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3775 struct dmar_domain *domain;
3776 struct intel_iommu *iommu;
3777 unsigned long iova_pfn;
3778 unsigned long nrpages;
3779 phys_addr_t tlb_addr;
3783 if (unlikely(attach_deferred(dev)))
3784 do_deferred_attach(dev);
3786 domain = find_domain(dev);
3788 if (WARN_ON(dir == DMA_NONE || !domain))
3789 return DMA_MAPPING_ERROR;
3791 iommu = domain_get_iommu(domain);
3792 if (WARN_ON(!iommu))
3793 return DMA_MAPPING_ERROR;
3795 nrpages = aligned_nrpages(0, size);
3796 iova_pfn = intel_alloc_iova(dev, domain,
3797 dma_to_mm_pfn(nrpages), dma_mask);
3799 return DMA_MAPPING_ERROR;
3802 * Check if DMAR supports zero-length reads on write only
3805 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
3806 !cap_zlr(iommu->cap))
3807 prot |= DMA_PTE_READ;
3808 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3809 prot |= DMA_PTE_WRITE;
3812 * If both the physical buffer start address and size are
3813 * page aligned, we don't need to use a bounce page.
3815 if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
3816 tlb_addr = swiotlb_tbl_map_single(dev,
3817 __phys_to_dma(dev, io_tlb_start),
3818 paddr, size, aligned_size, dir, attrs);
3819 if (tlb_addr == DMA_MAPPING_ERROR) {
3822 /* Cleanup the padding area. */
3823 void *padding_start = phys_to_virt(tlb_addr);
3824 size_t padding_size = aligned_size;
3826 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
3827 (dir == DMA_TO_DEVICE ||
3828 dir == DMA_BIDIRECTIONAL)) {
3829 padding_start += size;
3830 padding_size -= size;
3833 memset(padding_start, 0, padding_size);
3839 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3840 tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
3844 trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
3846 return (phys_addr_t)iova_pfn << PAGE_SHIFT;
3849 if (is_swiotlb_buffer(tlb_addr))
3850 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3851 aligned_size, dir, attrs);
3853 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3854 dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
3855 size, (unsigned long long)paddr, dir);
3857 return DMA_MAPPING_ERROR;
3861 bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
3862 enum dma_data_direction dir, unsigned long attrs)
3864 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3865 struct dmar_domain *domain;
3866 phys_addr_t tlb_addr;
3868 domain = find_domain(dev);
3869 if (WARN_ON(!domain))
3872 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
3873 if (WARN_ON(!tlb_addr))
3876 intel_unmap(dev, dev_addr, size);
3877 if (is_swiotlb_buffer(tlb_addr))
3878 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3879 aligned_size, dir, attrs);
3881 trace_bounce_unmap_single(dev, dev_addr, size);
3885 bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
3886 size_t size, enum dma_data_direction dir, unsigned long attrs)
3888 return bounce_map_single(dev, page_to_phys(page) + offset,
3889 size, dir, attrs, *dev->dma_mask);
3893 bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3894 enum dma_data_direction dir, unsigned long attrs)
3896 return bounce_map_single(dev, phys_addr, size,
3897 dir, attrs, *dev->dma_mask);
3901 bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
3902 enum dma_data_direction dir, unsigned long attrs)
3904 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
3908 bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
3909 enum dma_data_direction dir, unsigned long attrs)
3911 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
3915 bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3916 enum dma_data_direction dir, unsigned long attrs)
3918 struct scatterlist *sg;
3921 for_each_sg(sglist, sg, nelems, i)
3922 bounce_unmap_page(dev, sg->dma_address,
3923 sg_dma_len(sg), dir, attrs);
3927 bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3928 enum dma_data_direction dir, unsigned long attrs)
3931 struct scatterlist *sg;
3933 for_each_sg(sglist, sg, nelems, i) {
3934 sg->dma_address = bounce_map_page(dev, sg_page(sg),
3935 sg->offset, sg->length,
3937 if (sg->dma_address == DMA_MAPPING_ERROR)
3939 sg_dma_len(sg) = sg->length;
3942 for_each_sg(sglist, sg, nelems, i)
3943 trace_bounce_map_sg(dev, i + 1, nelems, sg);
3948 bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
3953 bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
3954 size_t size, enum dma_data_direction dir)
3956 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
3960 bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
3961 size_t size, enum dma_data_direction dir)
3963 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
3967 bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
3968 int nelems, enum dma_data_direction dir)
3970 struct scatterlist *sg;
3973 for_each_sg(sglist, sg, nelems, i)
3974 bounce_sync_single(dev, sg_dma_address(sg),
3975 sg_dma_len(sg), dir, SYNC_FOR_CPU);
3979 bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
3980 int nelems, enum dma_data_direction dir)
3982 struct scatterlist *sg;
3985 for_each_sg(sglist, sg, nelems, i)
3986 bounce_sync_single(dev, sg_dma_address(sg),
3987 sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
3990 static const struct dma_map_ops bounce_dma_ops = {
3991 .alloc = intel_alloc_coherent,
3992 .free = intel_free_coherent,
3993 .map_sg = bounce_map_sg,
3994 .unmap_sg = bounce_unmap_sg,
3995 .map_page = bounce_map_page,
3996 .unmap_page = bounce_unmap_page,
3997 .sync_single_for_cpu = bounce_sync_single_for_cpu,
3998 .sync_single_for_device = bounce_sync_single_for_device,
3999 .sync_sg_for_cpu = bounce_sync_sg_for_cpu,
4000 .sync_sg_for_device = bounce_sync_sg_for_device,
4001 .map_resource = bounce_map_resource,
4002 .unmap_resource = bounce_unmap_resource,
4003 .dma_supported = dma_direct_supported,
4006 static inline int iommu_domain_cache_init(void)
4010 iommu_domain_cache = kmem_cache_create("iommu_domain",
4011 sizeof(struct dmar_domain),
4016 if (!iommu_domain_cache) {
4017 pr_err("Couldn't create iommu_domain cache\n");
4024 static inline int iommu_devinfo_cache_init(void)
4028 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
4029 sizeof(struct device_domain_info),
4033 if (!iommu_devinfo_cache) {
4034 pr_err("Couldn't create devinfo cache\n");
4041 static int __init iommu_init_mempool(void)
4044 ret = iova_cache_get();
4048 ret = iommu_domain_cache_init();
4052 ret = iommu_devinfo_cache_init();
4056 kmem_cache_destroy(iommu_domain_cache);
4063 static void __init iommu_exit_mempool(void)
4065 kmem_cache_destroy(iommu_devinfo_cache);
4066 kmem_cache_destroy(iommu_domain_cache);
4070 static void __init init_no_remapping_devices(void)
4072 struct dmar_drhd_unit *drhd;
4076 for_each_drhd_unit(drhd) {
4077 if (!drhd->include_all) {
4078 for_each_active_dev_scope(drhd->devices,
4079 drhd->devices_cnt, i, dev)
4081 /* ignore DMAR unit if no devices exist */
4082 if (i == drhd->devices_cnt)
4087 for_each_active_drhd_unit(drhd) {
4088 if (drhd->include_all)
4091 for_each_active_dev_scope(drhd->devices,
4092 drhd->devices_cnt, i, dev)
4093 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4095 if (i < drhd->devices_cnt)
4098 /* This IOMMU has *only* gfx devices. Either bypass it or
4099 set the gfx_mapped flag, as appropriate */
4100 drhd->gfx_dedicated = 1;
4106 #ifdef CONFIG_SUSPEND
4107 static int init_iommu_hw(void)
4109 struct dmar_drhd_unit *drhd;
4110 struct intel_iommu *iommu = NULL;
4112 for_each_active_iommu(iommu, drhd)
4114 dmar_reenable_qi(iommu);
4116 for_each_iommu(iommu, drhd) {
4117 if (drhd->ignored) {
4119 * we always have to disable PMRs or DMA may fail on
4123 iommu_disable_protect_mem_regions(iommu);
4127 iommu_flush_write_buffer(iommu);
4129 iommu_set_root_entry(iommu);
4131 iommu->flush.flush_context(iommu, 0, 0, 0,
4132 DMA_CCMD_GLOBAL_INVL);
4133 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4134 iommu_enable_translation(iommu);
4135 iommu_disable_protect_mem_regions(iommu);
4141 static void iommu_flush_all(void)
4143 struct dmar_drhd_unit *drhd;
4144 struct intel_iommu *iommu;
4146 for_each_active_iommu(iommu, drhd) {
4147 iommu->flush.flush_context(iommu, 0, 0, 0,
4148 DMA_CCMD_GLOBAL_INVL);
4149 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4150 DMA_TLB_GLOBAL_FLUSH);
4154 static int iommu_suspend(void)
4156 struct dmar_drhd_unit *drhd;
4157 struct intel_iommu *iommu = NULL;
4160 for_each_active_iommu(iommu, drhd) {
4161 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
4163 if (!iommu->iommu_state)
4169 for_each_active_iommu(iommu, drhd) {
4170 iommu_disable_translation(iommu);
4172 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4174 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4175 readl(iommu->reg + DMAR_FECTL_REG);
4176 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4177 readl(iommu->reg + DMAR_FEDATA_REG);
4178 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4179 readl(iommu->reg + DMAR_FEADDR_REG);
4180 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4181 readl(iommu->reg + DMAR_FEUADDR_REG);
4183 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4188 for_each_active_iommu(iommu, drhd)
4189 kfree(iommu->iommu_state);
4194 static void iommu_resume(void)
4196 struct dmar_drhd_unit *drhd;
4197 struct intel_iommu *iommu = NULL;
4200 if (init_iommu_hw()) {
4202 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4204 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4208 for_each_active_iommu(iommu, drhd) {
4210 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4212 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4213 iommu->reg + DMAR_FECTL_REG);
4214 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4215 iommu->reg + DMAR_FEDATA_REG);
4216 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4217 iommu->reg + DMAR_FEADDR_REG);
4218 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4219 iommu->reg + DMAR_FEUADDR_REG);
4221 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4224 for_each_active_iommu(iommu, drhd)
4225 kfree(iommu->iommu_state);
4228 static struct syscore_ops iommu_syscore_ops = {
4229 .resume = iommu_resume,
4230 .suspend = iommu_suspend,
4233 static void __init init_iommu_pm_ops(void)
4235 register_syscore_ops(&iommu_syscore_ops);
4239 static inline void init_iommu_pm_ops(void) {}
4240 #endif /* CONFIG_PM */
4242 static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
4244 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
4245 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
4246 rmrr->end_address <= rmrr->base_address ||
4247 arch_rmrr_sanity_check(rmrr))
4253 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4255 struct acpi_dmar_reserved_memory *rmrr;
4256 struct dmar_rmrr_unit *rmrru;
4258 rmrr = (struct acpi_dmar_reserved_memory *)header;
4259 if (rmrr_sanity_check(rmrr)) {
4261 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
4262 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4263 rmrr->base_address, rmrr->end_address,
4264 dmi_get_system_info(DMI_BIOS_VENDOR),
4265 dmi_get_system_info(DMI_BIOS_VERSION),
4266 dmi_get_system_info(DMI_PRODUCT_VERSION));
4267 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
4270 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4274 rmrru->hdr = header;
4276 rmrru->base_address = rmrr->base_address;
4277 rmrru->end_address = rmrr->end_address;
4279 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4280 ((void *)rmrr) + rmrr->header.length,
4281 &rmrru->devices_cnt);
4282 if (rmrru->devices_cnt && rmrru->devices == NULL)
4285 list_add(&rmrru->list, &dmar_rmrr_units);
4294 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4296 struct dmar_atsr_unit *atsru;
4297 struct acpi_dmar_atsr *tmp;
4299 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
4301 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4302 if (atsr->segment != tmp->segment)
4304 if (atsr->header.length != tmp->header.length)
4306 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4313 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4315 struct acpi_dmar_atsr *atsr;
4316 struct dmar_atsr_unit *atsru;
4318 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4321 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4322 atsru = dmar_find_atsr(atsr);
4326 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4331 * If memory is allocated from slab by ACPI _DSM method, we need to
4332 * copy the memory content because the memory buffer will be freed
4335 atsru->hdr = (void *)(atsru + 1);
4336 memcpy(atsru->hdr, hdr, hdr->length);
4337 atsru->include_all = atsr->flags & 0x1;
4338 if (!atsru->include_all) {
4339 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4340 (void *)atsr + atsr->header.length,
4341 &atsru->devices_cnt);
4342 if (atsru->devices_cnt && atsru->devices == NULL) {
4348 list_add_rcu(&atsru->list, &dmar_atsr_units);
4353 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4355 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4359 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4361 struct acpi_dmar_atsr *atsr;
4362 struct dmar_atsr_unit *atsru;
4364 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4365 atsru = dmar_find_atsr(atsr);
4367 list_del_rcu(&atsru->list);
4369 intel_iommu_free_atsr(atsru);
4375 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4379 struct acpi_dmar_atsr *atsr;
4380 struct dmar_atsr_unit *atsru;
4382 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4383 atsru = dmar_find_atsr(atsr);
4387 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4388 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4396 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4399 struct intel_iommu *iommu = dmaru->iommu;
4401 if (g_iommus[iommu->seq_id])
4404 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4405 pr_warn("%s: Doesn't support hardware pass through.\n",
4409 if (!ecap_sc_support(iommu->ecap) &&
4410 domain_update_iommu_snooping(iommu)) {
4411 pr_warn("%s: Doesn't support snooping.\n",
4415 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
4416 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4417 pr_warn("%s: Doesn't support large page.\n",
4423 * Disable translation if already enabled prior to OS handover.
4425 if (iommu->gcmd & DMA_GCMD_TE)
4426 iommu_disable_translation(iommu);
4428 g_iommus[iommu->seq_id] = iommu;
4429 ret = iommu_init_domains(iommu);
4431 ret = iommu_alloc_root_entry(iommu);
4435 intel_svm_check(iommu);
4437 if (dmaru->ignored) {
4439 * we always have to disable PMRs or DMA may fail on this device
4442 iommu_disable_protect_mem_regions(iommu);
4446 intel_iommu_init_qi(iommu);
4447 iommu_flush_write_buffer(iommu);
4449 #ifdef CONFIG_INTEL_IOMMU_SVM
4450 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4451 ret = intel_svm_enable_prq(iommu);
4456 ret = dmar_set_interrupt(iommu);
4460 iommu_set_root_entry(iommu);
4461 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4462 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4463 iommu_enable_translation(iommu);
4465 iommu_disable_protect_mem_regions(iommu);
4469 disable_dmar_iommu(iommu);
4471 free_dmar_iommu(iommu);
4475 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4478 struct intel_iommu *iommu = dmaru->iommu;
4480 if (!intel_iommu_enabled)
4486 ret = intel_iommu_add(dmaru);
4488 disable_dmar_iommu(iommu);
4489 free_dmar_iommu(iommu);
4495 static void intel_iommu_free_dmars(void)
4497 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4498 struct dmar_atsr_unit *atsru, *atsr_n;
4500 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4501 list_del(&rmrru->list);
4502 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4506 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4507 list_del(&atsru->list);
4508 intel_iommu_free_atsr(atsru);
4512 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4515 struct pci_bus *bus;
4516 struct pci_dev *bridge = NULL;
4518 struct acpi_dmar_atsr *atsr;
4519 struct dmar_atsr_unit *atsru;
4521 dev = pci_physfn(dev);
4522 for (bus = dev->bus; bus; bus = bus->parent) {
4524 /* If it's an integrated device, allow ATS */
4527 /* Connected via non-PCIe: no ATS */
4528 if (!pci_is_pcie(bridge) ||
4529 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4531 /* If we found the root port, look it up in the ATSR */
4532 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4537 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4538 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4539 if (atsr->segment != pci_domain_nr(dev->bus))
4542 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4543 if (tmp == &bridge->dev)
4546 if (atsru->include_all)
4556 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4559 struct dmar_rmrr_unit *rmrru;
4560 struct dmar_atsr_unit *atsru;
4561 struct acpi_dmar_atsr *atsr;
4562 struct acpi_dmar_reserved_memory *rmrr;
4564 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4567 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4568 rmrr = container_of(rmrru->hdr,
4569 struct acpi_dmar_reserved_memory, header);
4570 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4571 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4572 ((void *)rmrr) + rmrr->header.length,
4573 rmrr->segment, rmrru->devices,
4574 rmrru->devices_cnt);
4577 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4578 dmar_remove_dev_scope(info, rmrr->segment,
4579 rmrru->devices, rmrru->devices_cnt);
4583 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4584 if (atsru->include_all)
4587 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4588 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4589 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4590 (void *)atsr + atsr->header.length,
4591 atsr->segment, atsru->devices,
4592 atsru->devices_cnt);
4597 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4598 if (dmar_remove_dev_scope(info, atsr->segment,
4599 atsru->devices, atsru->devices_cnt))
4607 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4608 unsigned long val, void *v)
4610 struct memory_notify *mhp = v;
4611 unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4612 unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
4616 case MEM_GOING_ONLINE:
4617 if (iommu_domain_identity_map(si_domain,
4618 start_vpfn, last_vpfn)) {
4619 pr_warn("Failed to build identity map for [%lx-%lx]\n",
4620 start_vpfn, last_vpfn);
4626 case MEM_CANCEL_ONLINE:
4628 struct dmar_drhd_unit *drhd;
4629 struct intel_iommu *iommu;
4630 struct page *freelist;
4632 freelist = domain_unmap(si_domain,
4633 start_vpfn, last_vpfn);
4636 for_each_active_iommu(iommu, drhd)
4637 iommu_flush_iotlb_psi(iommu, si_domain,
4638 start_vpfn, mhp->nr_pages,
4641 dma_free_pagelist(freelist);
4649 static struct notifier_block intel_iommu_memory_nb = {
4650 .notifier_call = intel_iommu_memory_notifier,
4654 static void free_all_cpu_cached_iovas(unsigned int cpu)
4658 for (i = 0; i < g_num_of_iommus; i++) {
4659 struct intel_iommu *iommu = g_iommus[i];
4660 struct dmar_domain *domain;
4666 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4667 domain = get_iommu_domain(iommu, (u16)did);
4669 if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
4672 free_cpu_cached_iovas(cpu, &domain->iovad);
4677 static int intel_iommu_cpu_dead(unsigned int cpu)
4679 free_all_cpu_cached_iovas(cpu);
4683 static void intel_disable_iommus(void)
4685 struct intel_iommu *iommu = NULL;
4686 struct dmar_drhd_unit *drhd;
4688 for_each_iommu(iommu, drhd)
4689 iommu_disable_translation(iommu);
4692 void intel_iommu_shutdown(void)
4694 struct dmar_drhd_unit *drhd;
4695 struct intel_iommu *iommu = NULL;
4697 if (no_iommu || dmar_disabled)
4700 down_write(&dmar_global_lock);
4702 /* Disable PMRs explicitly here. */
4703 for_each_iommu(iommu, drhd)
4704 iommu_disable_protect_mem_regions(iommu);
4706 /* Make sure the IOMMUs are switched off */
4707 intel_disable_iommus();
4709 up_write(&dmar_global_lock);
4712 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4714 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4716 return container_of(iommu_dev, struct intel_iommu, iommu);
4719 static ssize_t intel_iommu_show_version(struct device *dev,
4720 struct device_attribute *attr,
4723 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4724 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4725 return sprintf(buf, "%d:%d\n",
4726 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4728 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4730 static ssize_t intel_iommu_show_address(struct device *dev,
4731 struct device_attribute *attr,
4734 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4735 return sprintf(buf, "%llx\n", iommu->reg_phys);
4737 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4739 static ssize_t intel_iommu_show_cap(struct device *dev,
4740 struct device_attribute *attr,
4743 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4744 return sprintf(buf, "%llx\n", iommu->cap);
4746 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4748 static ssize_t intel_iommu_show_ecap(struct device *dev,
4749 struct device_attribute *attr,
4752 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4753 return sprintf(buf, "%llx\n", iommu->ecap);
4755 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4757 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4758 struct device_attribute *attr,
4761 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4762 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4764 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4766 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4767 struct device_attribute *attr,
4770 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4771 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4772 cap_ndoms(iommu->cap)));
4774 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4776 static struct attribute *intel_iommu_attrs[] = {
4777 &dev_attr_version.attr,
4778 &dev_attr_address.attr,
4780 &dev_attr_ecap.attr,
4781 &dev_attr_domains_supported.attr,
4782 &dev_attr_domains_used.attr,
4786 static struct attribute_group intel_iommu_group = {
4787 .name = "intel-iommu",
4788 .attrs = intel_iommu_attrs,
4791 const struct attribute_group *intel_iommu_groups[] = {
4796 static inline bool has_external_pci(void)
4798 struct pci_dev *pdev = NULL;
4800 for_each_pci_dev(pdev)
4801 if (pdev->external_facing)
4807 static int __init platform_optin_force_iommu(void)
4809 if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
4812 if (no_iommu || dmar_disabled)
4813 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4816 * If Intel-IOMMU is disabled by default, we will apply identity
4817 * map for all devices except those marked as being untrusted.
4820 iommu_set_default_passthrough(false);
4828 static int __init probe_acpi_namespace_devices(void)
4830 struct dmar_drhd_unit *drhd;
4831 /* To avoid a -Wunused-but-set-variable warning. */
4832 struct intel_iommu *iommu __maybe_unused;
4836 for_each_active_iommu(iommu, drhd) {
4837 for_each_active_dev_scope(drhd->devices,
4838 drhd->devices_cnt, i, dev) {
4839 struct acpi_device_physical_node *pn;
4840 struct iommu_group *group;
4841 struct acpi_device *adev;
4843 if (dev->bus != &acpi_bus_type)
4846 adev = to_acpi_device(dev);
4847 mutex_lock(&adev->physical_node_lock);
4848 list_for_each_entry(pn,
4849 &adev->physical_node_list, node) {
4850 group = iommu_group_get(pn->dev);
4852 iommu_group_put(group);
4856 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4857 ret = iommu_probe_device(pn->dev);
4861 mutex_unlock(&adev->physical_node_lock);
4871 int __init intel_iommu_init(void)
4874 struct dmar_drhd_unit *drhd;
4875 struct intel_iommu *iommu;
4878 * Intel IOMMU is required for a TXT/tboot launch or platform
4879 * opt in, so enforce that.
4881 force_on = tboot_force_iommu() || platform_optin_force_iommu();
4883 if (iommu_init_mempool()) {
4885 panic("tboot: Failed to initialize iommu memory\n");
4889 down_write(&dmar_global_lock);
4890 if (dmar_table_init()) {
4892 panic("tboot: Failed to initialize DMAR table\n");
4896 if (dmar_dev_scope_init() < 0) {
4898 panic("tboot: Failed to initialize DMAR device scope\n");
4902 up_write(&dmar_global_lock);
4905 * The bus notifier takes the dmar_global_lock, so lockdep will
4906 * complain later when we register it under the lock.
4908 dmar_register_bus_notifier();
4910 down_write(&dmar_global_lock);
4913 intel_iommu_debugfs_init();
4915 if (no_iommu || dmar_disabled) {
4917 * We exit the function here to ensure IOMMU's remapping and
4918 * mempool aren't setup, which means that the IOMMU's PMRs
4919 * won't be disabled via the call to init_dmars(). So disable
4920 * it explicitly here. The PMRs were setup by tboot prior to
4921 * calling SENTER, but the kernel is expected to reset/tear
4924 if (intel_iommu_tboot_noforce) {
4925 for_each_iommu(iommu, drhd)
4926 iommu_disable_protect_mem_regions(iommu);
4930 * Make sure the IOMMUs are switched off, even when we
4931 * boot into a kexec kernel and the previous kernel left
4934 intel_disable_iommus();
4938 if (list_empty(&dmar_rmrr_units))
4939 pr_info("No RMRR found\n");
4941 if (list_empty(&dmar_atsr_units))
4942 pr_info("No ATSR found\n");
4944 if (dmar_init_reserved_ranges()) {
4946 panic("tboot: Failed to reserve iommu ranges\n");
4947 goto out_free_reserved_range;
4951 intel_iommu_gfx_mapped = 1;
4953 init_no_remapping_devices();
4958 panic("tboot: Failed to initialize DMARs\n");
4959 pr_err("Initialization failed\n");
4960 goto out_free_reserved_range;
4962 up_write(&dmar_global_lock);
4964 init_iommu_pm_ops();
4966 down_read(&dmar_global_lock);
4967 for_each_active_iommu(iommu, drhd) {
4968 iommu_device_sysfs_add(&iommu->iommu, NULL,
4971 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4972 iommu_device_register(&iommu->iommu);
4974 up_read(&dmar_global_lock);
4976 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4977 if (si_domain && !hw_pass_through)
4978 register_memory_notifier(&intel_iommu_memory_nb);
4979 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4980 intel_iommu_cpu_dead);
4982 down_read(&dmar_global_lock);
4983 if (probe_acpi_namespace_devices())
4984 pr_warn("ACPI name space devices didn't probe correctly\n");
4986 /* Finally, we enable the DMA remapping hardware. */
4987 for_each_iommu(iommu, drhd) {
4988 if (!drhd->ignored && !translation_pre_enabled(iommu))
4989 iommu_enable_translation(iommu);
4991 iommu_disable_protect_mem_regions(iommu);
4993 up_read(&dmar_global_lock);
4995 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4997 intel_iommu_enabled = 1;
5001 out_free_reserved_range:
5002 put_iova_domain(&reserved_iova_list);
5004 intel_iommu_free_dmars();
5005 up_write(&dmar_global_lock);
5006 iommu_exit_mempool();
5010 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
5012 struct intel_iommu *iommu = opaque;
5014 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
5019 * NB - intel-iommu lacks any sort of reference counting for the users of
5020 * dependent devices. If multiple endpoints have intersecting dependent
5021 * devices, unbinding the driver from any one of them will possibly leave
5022 * the others unable to operate.
5024 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
5026 if (!iommu || !dev || !dev_is_pci(dev))
5029 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
5032 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
5034 struct dmar_domain *domain;
5035 struct intel_iommu *iommu;
5036 unsigned long flags;
5038 assert_spin_locked(&device_domain_lock);
5043 iommu = info->iommu;
5044 domain = info->domain;
5047 if (dev_is_pci(info->dev) && sm_supported(iommu))
5048 intel_pasid_tear_down_entry(iommu, info->dev,
5049 PASID_RID2PASID, false);
5051 iommu_disable_dev_iotlb(info);
5052 if (!dev_is_real_dma_subdevice(info->dev))
5053 domain_context_clear(iommu, info->dev);
5054 intel_pasid_free_table(info->dev);
5057 unlink_domain_info(info);
5059 spin_lock_irqsave(&iommu->lock, flags);
5060 domain_detach_iommu(domain, iommu);
5061 spin_unlock_irqrestore(&iommu->lock, flags);
5063 free_devinfo_mem(info);
5066 static void dmar_remove_one_dev_info(struct device *dev)
5068 struct device_domain_info *info;
5069 unsigned long flags;
5071 spin_lock_irqsave(&device_domain_lock, flags);
5072 info = get_domain_info(dev);
5074 __dmar_remove_one_dev_info(info);
5075 spin_unlock_irqrestore(&device_domain_lock, flags);
5078 static int md_domain_init(struct dmar_domain *domain, int guest_width)
5082 /* calculate AGAW */
5083 domain->gaw = guest_width;
5084 adjust_width = guestwidth_to_adjustwidth(guest_width);
5085 domain->agaw = width_to_agaw(adjust_width);
5087 domain->iommu_coherency = 0;
5088 domain->iommu_snooping = 0;
5089 domain->iommu_superpage = 0;
5090 domain->max_addr = 0;
5092 /* always allocate the top pgd */
5093 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5096 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5100 static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
5102 init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
5103 copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
5105 if (!intel_iommu_strict &&
5106 init_iova_flush_queue(&dmar_domain->iovad,
5107 iommu_flush_iova, iova_entry_free))
5108 pr_info("iova flush queue initialization failed\n");
5111 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5113 struct dmar_domain *dmar_domain;
5114 struct iommu_domain *domain;
5117 case IOMMU_DOMAIN_DMA:
5118 case IOMMU_DOMAIN_UNMANAGED:
5119 dmar_domain = alloc_domain(0);
5121 pr_err("Can't allocate dmar_domain\n");
5124 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5125 pr_err("Domain initialization failed\n");
5126 domain_exit(dmar_domain);
5130 if (type == IOMMU_DOMAIN_DMA)
5131 intel_init_iova_domain(dmar_domain);
5133 domain = &dmar_domain->domain;
5134 domain->geometry.aperture_start = 0;
5135 domain->geometry.aperture_end =
5136 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5137 domain->geometry.force_aperture = true;
5140 case IOMMU_DOMAIN_IDENTITY:
5141 return &si_domain->domain;
5149 static void intel_iommu_domain_free(struct iommu_domain *domain)
5151 if (domain != &si_domain->domain)
5152 domain_exit(to_dmar_domain(domain));
5156 * Check whether a @domain could be attached to the @dev through the
5157 * aux-domain attach/detach APIs.
5160 is_aux_domain(struct device *dev, struct iommu_domain *domain)
5162 struct device_domain_info *info = get_domain_info(dev);
5164 return info && info->auxd_enabled &&
5165 domain->type == IOMMU_DOMAIN_UNMANAGED;
5168 static void auxiliary_link_device(struct dmar_domain *domain,
5171 struct device_domain_info *info = get_domain_info(dev);
5173 assert_spin_locked(&device_domain_lock);
5177 domain->auxd_refcnt++;
5178 list_add(&domain->auxd, &info->auxiliary_domains);
5181 static void auxiliary_unlink_device(struct dmar_domain *domain,
5184 struct device_domain_info *info = get_domain_info(dev);
5186 assert_spin_locked(&device_domain_lock);
5190 list_del(&domain->auxd);
5191 domain->auxd_refcnt--;
5193 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5194 ioasid_free(domain->default_pasid);
5197 static int aux_domain_add_dev(struct dmar_domain *domain,
5201 unsigned long flags;
5202 struct intel_iommu *iommu;
5204 iommu = device_to_iommu(dev, NULL, NULL);
5208 if (domain->default_pasid <= 0) {
5211 /* No private data needed for the default pasid */
5212 pasid = ioasid_alloc(NULL, PASID_MIN,
5213 pci_max_pasids(to_pci_dev(dev)) - 1,
5215 if (pasid == INVALID_IOASID) {
5216 pr_err("Can't allocate default pasid\n");
5219 domain->default_pasid = pasid;
5222 spin_lock_irqsave(&device_domain_lock, flags);
5224 * iommu->lock must be held to attach domain to iommu and setup the
5225 * pasid entry for second level translation.
5227 spin_lock(&iommu->lock);
5228 ret = domain_attach_iommu(domain, iommu);
5232 /* Setup the PASID entry for mediated devices: */
5233 if (domain_use_first_level(domain))
5234 ret = domain_setup_first_level(iommu, domain, dev,
5235 domain->default_pasid);
5237 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5238 domain->default_pasid);
5241 spin_unlock(&iommu->lock);
5243 auxiliary_link_device(domain, dev);
5245 spin_unlock_irqrestore(&device_domain_lock, flags);
5250 domain_detach_iommu(domain, iommu);
5252 spin_unlock(&iommu->lock);
5253 spin_unlock_irqrestore(&device_domain_lock, flags);
5254 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5255 ioasid_free(domain->default_pasid);
5260 static void aux_domain_remove_dev(struct dmar_domain *domain,
5263 struct device_domain_info *info;
5264 struct intel_iommu *iommu;
5265 unsigned long flags;
5267 if (!is_aux_domain(dev, &domain->domain))
5270 spin_lock_irqsave(&device_domain_lock, flags);
5271 info = get_domain_info(dev);
5272 iommu = info->iommu;
5274 auxiliary_unlink_device(domain, dev);
5276 spin_lock(&iommu->lock);
5277 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
5278 domain_detach_iommu(domain, iommu);
5279 spin_unlock(&iommu->lock);
5281 spin_unlock_irqrestore(&device_domain_lock, flags);
5284 static int prepare_domain_attach_device(struct iommu_domain *domain,
5287 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5288 struct intel_iommu *iommu;
5291 iommu = device_to_iommu(dev, NULL, NULL);
5295 /* check if this iommu agaw is sufficient for max mapped address */
5296 addr_width = agaw_to_width(iommu->agaw);
5297 if (addr_width > cap_mgaw(iommu->cap))
5298 addr_width = cap_mgaw(iommu->cap);
5300 if (dmar_domain->max_addr > (1LL << addr_width)) {
5301 dev_err(dev, "%s: iommu width (%d) is not "
5302 "sufficient for the mapped address (%llx)\n",
5303 __func__, addr_width, dmar_domain->max_addr);
5306 dmar_domain->gaw = addr_width;
5309 * Knock out extra levels of page tables if necessary
5311 while (iommu->agaw < dmar_domain->agaw) {
5312 struct dma_pte *pte;
5314 pte = dmar_domain->pgd;
5315 if (dma_pte_present(pte)) {
5316 dmar_domain->pgd = (struct dma_pte *)
5317 phys_to_virt(dma_pte_addr(pte));
5318 free_pgtable_page(pte);
5320 dmar_domain->agaw--;
5326 static int intel_iommu_attach_device(struct iommu_domain *domain,
5331 if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
5332 device_is_rmrr_locked(dev)) {
5333 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5337 if (is_aux_domain(dev, domain))
5340 /* normally dev is not mapped */
5341 if (unlikely(domain_context_mapped(dev))) {
5342 struct dmar_domain *old_domain;
5344 old_domain = find_domain(dev);
5346 dmar_remove_one_dev_info(dev);
5349 ret = prepare_domain_attach_device(domain, dev);
5353 return domain_add_dev_info(to_dmar_domain(domain), dev);
5356 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5361 if (!is_aux_domain(dev, domain))
5364 ret = prepare_domain_attach_device(domain, dev);
5368 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5371 static void intel_iommu_detach_device(struct iommu_domain *domain,
5374 dmar_remove_one_dev_info(dev);
5377 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5380 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5384 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
5385 * VT-d granularity. Invalidation is typically included in the unmap operation
5386 * as a result of DMA or VFIO unmap. However, for assigned devices guest
5387 * owns the first level page tables. Invalidations of translation caches in the
5388 * guest are trapped and passed down to the host.
5390 * vIOMMU in the guest will only expose first level page tables, therefore
5391 * we do not support IOTLB granularity for request without PASID (second level).
5393 * For example, to find the VT-d granularity encoding for IOTLB
5394 * type and page selective granularity within PASID:
5395 * X: indexed by iommu cache type
5396 * Y: indexed by enum iommu_inv_granularity
5397 * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
5401 inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
5403 * PASID based IOTLB invalidation: PASID selective (per PASID),
5404 * page selective (address granularity)
5406 {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
5407 /* PASID based dev TLBs */
5408 {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
5410 {-EINVAL, -EINVAL, -EINVAL}
5413 static inline int to_vtd_granularity(int type, int granu)
5415 return inv_type_granu_table[type][granu];
5418 static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
5420 u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
5422 /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
5423 * IOMMU cache invalidate API passes granu_size in bytes, and number of
5424 * granu size in contiguous memory.
5426 return order_base_2(nr_pages);
5429 #ifdef CONFIG_INTEL_IOMMU_SVM
5431 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
5432 struct iommu_cache_invalidate_info *inv_info)
5434 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5435 struct device_domain_info *info;
5436 struct intel_iommu *iommu;
5437 unsigned long flags;
5444 if (!inv_info || !dmar_domain)
5447 if (!dev || !dev_is_pci(dev))
5450 iommu = device_to_iommu(dev, &bus, &devfn);
5454 if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
5457 spin_lock_irqsave(&device_domain_lock, flags);
5458 spin_lock(&iommu->lock);
5459 info = get_domain_info(dev);
5464 did = dmar_domain->iommu_did[iommu->seq_id];
5465 sid = PCI_DEVID(bus, devfn);
5467 /* Size is only valid in address selective invalidation */
5468 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
5469 size = to_vtd_size(inv_info->granu.addr_info.granule_size,
5470 inv_info->granu.addr_info.nb_granules);
5472 for_each_set_bit(cache_type,
5473 (unsigned long *)&inv_info->cache,
5474 IOMMU_CACHE_INV_TYPE_NR) {
5479 granu = to_vtd_granularity(cache_type, inv_info->granularity);
5480 if (granu == -EINVAL) {
5481 pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
5482 cache_type, inv_info->granularity);
5487 * PASID is stored in different locations based on the
5490 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
5491 (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
5492 pasid = inv_info->granu.pasid_info.pasid;
5493 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
5494 (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
5495 pasid = inv_info->granu.addr_info.pasid;
5497 switch (BIT(cache_type)) {
5498 case IOMMU_CACHE_INV_TYPE_IOTLB:
5499 /* HW will ignore LSB bits based on address mask */
5500 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
5502 (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
5503 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
5504 inv_info->granu.addr_info.addr, size);
5508 * If granu is PASID-selective, address is ignored.
5509 * We use npages = -1 to indicate that.
5511 qi_flush_piotlb(iommu, did, pasid,
5512 mm_to_dma_pfn(inv_info->granu.addr_info.addr),
5513 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
5514 inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
5516 if (!info->ats_enabled)
5519 * Always flush device IOTLB if ATS is enabled. vIOMMU
5520 * in the guest may assume IOTLB flush is inclusive,
5521 * which is more efficient.
5524 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
5526 * PASID based device TLB invalidation does not support
5527 * IOMMU_INV_GRANU_PASID granularity but only supports
5528 * IOMMU_INV_GRANU_ADDR.
5529 * The equivalent of that is we set the size to be the
5530 * entire range of 64 bit. User only provides PASID info
5531 * without address info. So we set addr to 0.
5533 if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
5534 size = 64 - VTD_PAGE_SHIFT;
5536 } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
5537 addr = inv_info->granu.addr_info.addr;
5540 if (info->ats_enabled)
5541 qi_flush_dev_iotlb_pasid(iommu, sid,
5543 info->ats_qdep, addr,
5546 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5549 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5555 spin_unlock(&iommu->lock);
5556 spin_unlock_irqrestore(&device_domain_lock, flags);
5562 static int intel_iommu_map(struct iommu_domain *domain,
5563 unsigned long iova, phys_addr_t hpa,
5564 size_t size, int iommu_prot, gfp_t gfp)
5566 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5571 if (iommu_prot & IOMMU_READ)
5572 prot |= DMA_PTE_READ;
5573 if (iommu_prot & IOMMU_WRITE)
5574 prot |= DMA_PTE_WRITE;
5575 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5576 prot |= DMA_PTE_SNP;
5578 max_addr = iova + size;
5579 if (dmar_domain->max_addr < max_addr) {
5582 /* check if minimum agaw is sufficient for mapped address */
5583 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5584 if (end < max_addr) {
5585 pr_err("%s: iommu width (%d) is not "
5586 "sufficient for the mapped address (%llx)\n",
5587 __func__, dmar_domain->gaw, max_addr);
5590 dmar_domain->max_addr = max_addr;
5592 /* Round up size to next multiple of PAGE_SIZE, if it and
5593 the low bits of hpa would take us onto the next page */
5594 size = aligned_nrpages(hpa, size);
5595 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5596 hpa >> VTD_PAGE_SHIFT, size, prot);
5600 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5601 unsigned long iova, size_t size,
5602 struct iommu_iotlb_gather *gather)
5604 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5605 struct page *freelist = NULL;
5606 unsigned long start_pfn, last_pfn;
5607 unsigned int npages;
5608 int iommu_id, level = 0;
5610 /* Cope with horrid API which requires us to unmap more than the
5611 size argument if it happens to be a large-page mapping. */
5612 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5614 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5615 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5617 start_pfn = iova >> VTD_PAGE_SHIFT;
5618 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5620 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5622 npages = last_pfn - start_pfn + 1;
5624 for_each_domain_iommu(iommu_id, dmar_domain)
5625 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5626 start_pfn, npages, !freelist, 0);
5628 dma_free_pagelist(freelist);
5630 if (dmar_domain->max_addr == iova + size)
5631 dmar_domain->max_addr = iova;
5636 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5639 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5640 struct dma_pte *pte;
5644 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5645 if (pte && dma_pte_present(pte))
5646 phys = dma_pte_addr(pte) +
5647 (iova & (BIT_MASK(level_to_offset_bits(level) +
5648 VTD_PAGE_SHIFT) - 1));
5653 static inline bool scalable_mode_support(void)
5655 struct dmar_drhd_unit *drhd;
5656 struct intel_iommu *iommu;
5660 for_each_active_iommu(iommu, drhd) {
5661 if (!sm_supported(iommu)) {
5671 static inline bool iommu_pasid_support(void)
5673 struct dmar_drhd_unit *drhd;
5674 struct intel_iommu *iommu;
5678 for_each_active_iommu(iommu, drhd) {
5679 if (!pasid_supported(iommu)) {
5689 static inline bool nested_mode_support(void)
5691 struct dmar_drhd_unit *drhd;
5692 struct intel_iommu *iommu;
5696 for_each_active_iommu(iommu, drhd) {
5697 if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
5707 static bool intel_iommu_capable(enum iommu_cap cap)
5709 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5710 return domain_update_iommu_snooping(NULL) == 1;
5711 if (cap == IOMMU_CAP_INTR_REMAP)
5712 return irq_remapping_enabled == 1;
5717 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5719 struct intel_iommu *iommu;
5721 iommu = device_to_iommu(dev, NULL, NULL);
5723 return ERR_PTR(-ENODEV);
5725 if (translation_pre_enabled(iommu))
5726 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
5728 return &iommu->iommu;
5731 static void intel_iommu_release_device(struct device *dev)
5733 struct intel_iommu *iommu;
5735 iommu = device_to_iommu(dev, NULL, NULL);
5739 dmar_remove_one_dev_info(dev);
5741 set_dma_ops(dev, NULL);
5744 static void intel_iommu_probe_finalize(struct device *dev)
5746 struct iommu_domain *domain;
5748 domain = iommu_get_domain_for_dev(dev);
5749 if (device_needs_bounce(dev))
5750 set_dma_ops(dev, &bounce_dma_ops);
5751 else if (domain && domain->type == IOMMU_DOMAIN_DMA)
5752 set_dma_ops(dev, &intel_dma_ops);
5754 set_dma_ops(dev, NULL);
5757 static void intel_iommu_get_resv_regions(struct device *device,
5758 struct list_head *head)
5760 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5761 struct iommu_resv_region *reg;
5762 struct dmar_rmrr_unit *rmrr;
5763 struct device *i_dev;
5766 down_read(&dmar_global_lock);
5767 for_each_rmrr_units(rmrr) {
5768 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5770 struct iommu_resv_region *resv;
5771 enum iommu_resv_type type;
5774 if (i_dev != device &&
5775 !is_downstream_to_pci_bridge(device, i_dev))
5778 length = rmrr->end_address - rmrr->base_address + 1;
5780 type = device_rmrr_is_relaxable(device) ?
5781 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5783 resv = iommu_alloc_resv_region(rmrr->base_address,
5784 length, prot, type);
5788 list_add_tail(&resv->list, head);
5791 up_read(&dmar_global_lock);
5793 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5794 if (dev_is_pci(device)) {
5795 struct pci_dev *pdev = to_pci_dev(device);
5797 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5798 reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
5799 IOMMU_RESV_DIRECT_RELAXABLE);
5801 list_add_tail(®->list, head);
5804 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5806 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5807 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5811 list_add_tail(®->list, head);
5814 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5816 struct device_domain_info *info;
5817 struct context_entry *context;
5818 struct dmar_domain *domain;
5819 unsigned long flags;
5823 domain = find_domain(dev);
5827 spin_lock_irqsave(&device_domain_lock, flags);
5828 spin_lock(&iommu->lock);
5831 info = get_domain_info(dev);
5832 if (!info || !info->pasid_supported)
5835 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5836 if (WARN_ON(!context))
5839 ctx_lo = context[0].lo;
5841 if (!(ctx_lo & CONTEXT_PASIDE)) {
5842 ctx_lo |= CONTEXT_PASIDE;
5843 context[0].lo = ctx_lo;
5845 iommu->flush.flush_context(iommu,
5846 domain->iommu_did[iommu->seq_id],
5847 PCI_DEVID(info->bus, info->devfn),
5848 DMA_CCMD_MASK_NOBIT,
5849 DMA_CCMD_DEVICE_INVL);
5852 /* Enable PASID support in the device, if it wasn't already */
5853 if (!info->pasid_enabled)
5854 iommu_enable_dev_iotlb(info);
5859 spin_unlock(&iommu->lock);
5860 spin_unlock_irqrestore(&device_domain_lock, flags);
5865 static void intel_iommu_apply_resv_region(struct device *dev,
5866 struct iommu_domain *domain,
5867 struct iommu_resv_region *region)
5869 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5870 unsigned long start, end;
5872 start = IOVA_PFN(region->start);
5873 end = IOVA_PFN(region->start + region->length - 1);
5875 WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
5878 static struct iommu_group *intel_iommu_device_group(struct device *dev)
5880 if (dev_is_pci(dev))
5881 return pci_device_group(dev);
5882 return generic_device_group(dev);
5885 static int intel_iommu_enable_auxd(struct device *dev)
5887 struct device_domain_info *info;
5888 struct intel_iommu *iommu;
5889 unsigned long flags;
5892 iommu = device_to_iommu(dev, NULL, NULL);
5893 if (!iommu || dmar_disabled)
5896 if (!sm_supported(iommu) || !pasid_supported(iommu))
5899 ret = intel_iommu_enable_pasid(iommu, dev);
5903 spin_lock_irqsave(&device_domain_lock, flags);
5904 info = get_domain_info(dev);
5905 info->auxd_enabled = 1;
5906 spin_unlock_irqrestore(&device_domain_lock, flags);
5911 static int intel_iommu_disable_auxd(struct device *dev)
5913 struct device_domain_info *info;
5914 unsigned long flags;
5916 spin_lock_irqsave(&device_domain_lock, flags);
5917 info = get_domain_info(dev);
5918 if (!WARN_ON(!info))
5919 info->auxd_enabled = 0;
5920 spin_unlock_irqrestore(&device_domain_lock, flags);
5926 * A PCI express designated vendor specific extended capability is defined
5927 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5928 * for system software and tools to detect endpoint devices supporting the
5929 * Intel scalable IO virtualization without host driver dependency.
5931 * Returns the address of the matching extended capability structure within
5932 * the device's PCI configuration space or 0 if the device does not support
5935 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5940 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5942 pci_read_config_word(pdev, pos + 4, &vendor);
5943 pci_read_config_word(pdev, pos + 8, &id);
5944 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5947 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5954 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5956 if (feat == IOMMU_DEV_FEAT_AUX) {
5959 if (!dev_is_pci(dev) || dmar_disabled ||
5960 !scalable_mode_support() || !iommu_pasid_support())
5963 ret = pci_pasid_features(to_pci_dev(dev));
5967 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5970 if (feat == IOMMU_DEV_FEAT_SVA) {
5971 struct device_domain_info *info = get_domain_info(dev);
5973 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
5974 info->pasid_supported && info->pri_supported &&
5975 info->ats_supported;
5982 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5984 if (feat == IOMMU_DEV_FEAT_AUX)
5985 return intel_iommu_enable_auxd(dev);
5987 if (feat == IOMMU_DEV_FEAT_SVA) {
5988 struct device_domain_info *info = get_domain_info(dev);
5993 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
6001 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
6003 if (feat == IOMMU_DEV_FEAT_AUX)
6004 return intel_iommu_disable_auxd(dev);
6010 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
6012 struct device_domain_info *info = get_domain_info(dev);
6014 if (feat == IOMMU_DEV_FEAT_AUX)
6015 return scalable_mode_support() && info && info->auxd_enabled;
6021 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
6023 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
6025 return dmar_domain->default_pasid > 0 ?
6026 dmar_domain->default_pasid : -EINVAL;
6029 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
6032 return attach_deferred(dev);
6036 intel_iommu_domain_set_attr(struct iommu_domain *domain,
6037 enum iommu_attr attr, void *data)
6039 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
6040 unsigned long flags;
6043 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
6047 case DOMAIN_ATTR_NESTING:
6048 spin_lock_irqsave(&device_domain_lock, flags);
6049 if (nested_mode_support() &&
6050 list_empty(&dmar_domain->devices)) {
6051 dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
6052 dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
6056 spin_unlock_irqrestore(&device_domain_lock, flags);
6067 * Check that the device does not live on an external facing PCI port that is
6068 * marked as untrusted. Such devices should not be able to apply quirks and
6069 * thus not be able to bypass the IOMMU restrictions.
6071 static bool risky_device(struct pci_dev *pdev)
6073 if (pdev->untrusted) {
6075 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
6076 pdev->vendor, pdev->device);
6077 pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
6083 const struct iommu_ops intel_iommu_ops = {
6084 .capable = intel_iommu_capable,
6085 .domain_alloc = intel_iommu_domain_alloc,
6086 .domain_free = intel_iommu_domain_free,
6087 .domain_set_attr = intel_iommu_domain_set_attr,
6088 .attach_dev = intel_iommu_attach_device,
6089 .detach_dev = intel_iommu_detach_device,
6090 .aux_attach_dev = intel_iommu_aux_attach_device,
6091 .aux_detach_dev = intel_iommu_aux_detach_device,
6092 .aux_get_pasid = intel_iommu_aux_get_pasid,
6093 .map = intel_iommu_map,
6094 .unmap = intel_iommu_unmap,
6095 .iova_to_phys = intel_iommu_iova_to_phys,
6096 .probe_device = intel_iommu_probe_device,
6097 .probe_finalize = intel_iommu_probe_finalize,
6098 .release_device = intel_iommu_release_device,
6099 .get_resv_regions = intel_iommu_get_resv_regions,
6100 .put_resv_regions = generic_iommu_put_resv_regions,
6101 .apply_resv_region = intel_iommu_apply_resv_region,
6102 .device_group = intel_iommu_device_group,
6103 .dev_has_feat = intel_iommu_dev_has_feat,
6104 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
6105 .dev_enable_feat = intel_iommu_dev_enable_feat,
6106 .dev_disable_feat = intel_iommu_dev_disable_feat,
6107 .is_attach_deferred = intel_iommu_is_attach_deferred,
6108 .def_domain_type = device_def_domain_type,
6109 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
6110 #ifdef CONFIG_INTEL_IOMMU_SVM
6111 .cache_invalidate = intel_iommu_sva_invalidate,
6112 .sva_bind_gpasid = intel_svm_bind_gpasid,
6113 .sva_unbind_gpasid = intel_svm_unbind_gpasid,
6114 .sva_bind = intel_svm_bind,
6115 .sva_unbind = intel_svm_unbind,
6116 .sva_get_pasid = intel_svm_get_pasid,
6117 .page_response = intel_svm_page_response,
6121 static void quirk_iommu_igfx(struct pci_dev *dev)
6123 if (risky_device(dev))
6126 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
6130 /* G4x/GM45 integrated gfx dmar support is totally busted. */
6131 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
6132 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
6133 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
6134 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
6135 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
6136 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
6137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
6139 /* Broadwell igfx malfunctions with dmar */
6140 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
6141 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
6142 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
6143 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
6144 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
6145 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
6146 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
6147 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
6148 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
6149 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
6150 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
6151 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
6152 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
6153 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
6154 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
6155 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
6156 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
6157 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
6158 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
6159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
6160 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
6161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
6162 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
6163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
6165 static void quirk_iommu_rwbf(struct pci_dev *dev)
6167 if (risky_device(dev))
6171 * Mobile 4 Series Chipset neglects to set RWBF capability,
6172 * but needs it. Same seems to hold for the desktop versions.
6174 pci_info(dev, "Forcing write-buffer flush capability\n");
6178 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
6179 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
6180 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
6181 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
6182 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
6183 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
6184 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
6187 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
6188 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
6189 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
6190 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
6191 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
6192 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
6193 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
6194 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
6196 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
6200 if (risky_device(dev))
6203 if (pci_read_config_word(dev, GGC, &ggc))
6206 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
6207 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
6209 } else if (dmar_map_gfx) {
6210 /* we have to ensure the gfx device is idle before we flush */
6211 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
6212 intel_iommu_strict = 1;
6215 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
6216 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
6217 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
6218 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
6220 static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
6224 if (!IS_GFX_DEVICE(dev))
6227 ver = (dev->device >> 8) & 0xff;
6228 if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
6229 ver != 0x4e && ver != 0x8a && ver != 0x98 &&
6233 if (risky_device(dev))
6236 pci_info(dev, "Skip IOMMU disabling for graphics\n");
6237 iommu_skip_te_disable = 1;
6239 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
6241 /* On Tylersburg chipsets, some BIOSes have been known to enable the
6242 ISOCH DMAR unit for the Azalia sound device, but not give it any
6243 TLB entries, which causes it to deadlock. Check for that. We do
6244 this in a function called from init_dmars(), instead of in a PCI
6245 quirk, because we don't want to print the obnoxious "BIOS broken"
6246 message if VT-d is actually disabled.
6248 static void __init check_tylersburg_isoch(void)
6250 struct pci_dev *pdev;
6251 uint32_t vtisochctrl;
6253 /* If there's no Azalia in the system anyway, forget it. */
6254 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
6258 if (risky_device(pdev)) {
6265 /* System Management Registers. Might be hidden, in which case
6266 we can't do the sanity check. But that's OK, because the
6267 known-broken BIOSes _don't_ actually hide it, so far. */
6268 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
6272 if (risky_device(pdev)) {
6277 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
6284 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
6285 if (vtisochctrl & 1)
6288 /* Drop all bits other than the number of TLB entries */
6289 vtisochctrl &= 0x1c;
6291 /* If we have the recommended number of TLB entries (16), fine. */
6292 if (vtisochctrl == 0x10)
6295 /* Zero TLB entries? You get to ride the short bus to school. */
6297 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
6298 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
6299 dmi_get_system_info(DMI_BIOS_VENDOR),
6300 dmi_get_system_info(DMI_BIOS_VERSION),
6301 dmi_get_system_info(DMI_PRODUCT_VERSION));
6302 iommu_identity_mapping |= IDENTMAP_AZALIA;
6306 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",