2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
53 #include "irq_remapping.h"
55 #define ROOT_SIZE VTD_PAGE_SIZE
56 #define CONTEXT_SIZE VTD_PAGE_SIZE
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
63 #define IOAPIC_RANGE_START (0xfee00000)
64 #define IOAPIC_RANGE_END (0xfeefffff)
65 #define IOVA_START_ADDR (0x1000)
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
72 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN (1)
84 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
88 /* page table handling */
89 #define LEVEL_STRIDE (9)
90 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
108 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110 static inline int agaw_to_level(int agaw)
115 static inline int agaw_to_width(int agaw)
117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
120 static inline int width_to_agaw(int width)
122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
125 static inline unsigned int level_to_offset_bits(int level)
127 return (level - 1) * LEVEL_STRIDE;
130 static inline int pfn_level_offset(unsigned long pfn, int level)
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
135 static inline unsigned long level_mask(int level)
137 return -1UL << level_to_offset_bits(level);
140 static inline unsigned long level_size(int level)
142 return 1UL << level_to_offset_bits(level);
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
147 return (pfn + level_size(level) - 1) & level_mask(level);
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
168 return mm_to_dma_pfn(page_to_pfn(pg));
170 static inline unsigned long virt_to_dma_pfn(void *p)
172 return page_to_dma_pfn(virt_to_page(p));
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
185 static int force_on = 0;
186 int intel_iommu_tboot_noforce;
191 * 12-63: Context Ptr (12 - (haw-1))
198 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
201 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
204 static phys_addr_t root_entry_lctp(struct root_entry *re)
209 return re->lo & VTD_PAGE_MASK;
213 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
216 static phys_addr_t root_entry_uctp(struct root_entry *re)
221 return re->hi & VTD_PAGE_MASK;
226 * 1: fault processing disable
227 * 2-3: translation type
228 * 12-63: address space root
234 struct context_entry {
239 static inline void context_clear_pasid_enable(struct context_entry *context)
241 context->lo &= ~(1ULL << 11);
244 static inline bool context_pasid_enabled(struct context_entry *context)
246 return !!(context->lo & (1ULL << 11));
249 static inline void context_set_copied(struct context_entry *context)
251 context->hi |= (1ull << 3);
254 static inline bool context_copied(struct context_entry *context)
256 return !!(context->hi & (1ULL << 3));
259 static inline bool __context_present(struct context_entry *context)
261 return (context->lo & 1);
264 static inline bool context_present(struct context_entry *context)
266 return context_pasid_enabled(context) ?
267 __context_present(context) :
268 __context_present(context) && !context_copied(context);
271 static inline void context_set_present(struct context_entry *context)
276 static inline void context_set_fault_enable(struct context_entry *context)
278 context->lo &= (((u64)-1) << 2) | 1;
281 static inline void context_set_translation_type(struct context_entry *context,
284 context->lo &= (((u64)-1) << 4) | 3;
285 context->lo |= (value & 3) << 2;
288 static inline void context_set_address_root(struct context_entry *context,
291 context->lo &= ~VTD_PAGE_MASK;
292 context->lo |= value & VTD_PAGE_MASK;
295 static inline void context_set_address_width(struct context_entry *context,
298 context->hi |= value & 7;
301 static inline void context_set_domain_id(struct context_entry *context,
304 context->hi |= (value & ((1 << 16) - 1)) << 8;
307 static inline int context_domain_id(struct context_entry *c)
309 return((c->hi >> 8) & 0xffff);
312 static inline void context_clear_entry(struct context_entry *context)
325 * 12-63: Host physcial address
331 static inline void dma_clear_pte(struct dma_pte *pte)
336 static inline u64 dma_pte_addr(struct dma_pte *pte)
339 return pte->val & VTD_PAGE_MASK;
341 /* Must have a full atomic 64-bit read */
342 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
346 static inline bool dma_pte_present(struct dma_pte *pte)
348 return (pte->val & 3) != 0;
351 static inline bool dma_pte_superpage(struct dma_pte *pte)
353 return (pte->val & DMA_PTE_LARGE_PAGE);
356 static inline int first_pte_in_page(struct dma_pte *pte)
358 return !((unsigned long)pte & ~VTD_PAGE_MASK);
362 * This domain is a statically identity mapping domain.
363 * 1. This domain creats a static 1:1 mapping to all usable memory.
364 * 2. It maps to each iommu if successful.
365 * 3. Each iommu mapps to this domain if successful.
367 static struct dmar_domain *si_domain;
368 static int hw_pass_through = 1;
371 * Domain represents a virtual machine, more than one devices
372 * across iommus may be owned in one domain, e.g. kvm guest.
374 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
376 /* si_domain contains mulitple devices */
377 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
379 #define for_each_domain_iommu(idx, domain) \
380 for (idx = 0; idx < g_num_of_iommus; idx++) \
381 if (domain->iommu_refcnt[idx])
384 int nid; /* node id */
386 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
387 /* Refcount of devices per iommu */
390 u16 iommu_did[DMAR_UNITS_SUPPORTED];
391 /* Domain ids per IOMMU. Use u16 since
392 * domain ids are 16 bit wide according
393 * to VT-d spec, section 9.3 */
395 bool has_iotlb_device;
396 struct list_head devices; /* all devices' list */
397 struct iova_domain iovad; /* iova's that belong to this domain */
399 struct dma_pte *pgd; /* virtual address */
400 int gaw; /* max guest address width */
402 /* adjusted guest address width, 0 is level 2 30-bit */
405 int flags; /* flags to find out type of domain */
407 int iommu_coherency;/* indicate coherency of iommu access */
408 int iommu_snooping; /* indicate snooping control feature*/
409 int iommu_count; /* reference count of iommu */
410 int iommu_superpage;/* Level of superpages supported:
411 0 == 4KiB (no superpages), 1 == 2MiB,
412 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
413 u64 max_addr; /* maximum mapped address */
415 struct iommu_domain domain; /* generic domain data structure for
419 /* PCI domain-device relationship */
420 struct device_domain_info {
421 struct list_head link; /* link to domain siblings */
422 struct list_head global; /* link to global list */
423 u8 bus; /* PCI bus number */
424 u8 devfn; /* PCI devfn number */
425 u8 pasid_supported:3;
432 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
433 struct intel_iommu *iommu; /* IOMMU used by this device */
434 struct dmar_domain *domain; /* pointer to domain */
437 struct dmar_rmrr_unit {
438 struct list_head list; /* list of rmrr units */
439 struct acpi_dmar_header *hdr; /* ACPI header */
440 u64 base_address; /* reserved base address*/
441 u64 end_address; /* reserved end address */
442 struct dmar_dev_scope *devices; /* target devices */
443 int devices_cnt; /* target device count */
444 struct iommu_resv_region *resv; /* reserved region handle */
447 struct dmar_atsr_unit {
448 struct list_head list; /* list of ATSR units */
449 struct acpi_dmar_header *hdr; /* ACPI header */
450 struct dmar_dev_scope *devices; /* target devices */
451 int devices_cnt; /* target device count */
452 u8 include_all:1; /* include all ports */
455 static LIST_HEAD(dmar_atsr_units);
456 static LIST_HEAD(dmar_rmrr_units);
458 #define for_each_rmrr_units(rmrr) \
459 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
461 /* bitmap for indexing intel_iommus */
462 static int g_num_of_iommus;
464 static void domain_exit(struct dmar_domain *domain);
465 static void domain_remove_dev_info(struct dmar_domain *domain);
466 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
468 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
469 static void domain_context_clear(struct intel_iommu *iommu,
471 static int domain_detach_iommu(struct dmar_domain *domain,
472 struct intel_iommu *iommu);
474 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
475 int dmar_disabled = 0;
477 int dmar_disabled = 1;
478 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
480 int intel_iommu_enabled = 0;
481 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
483 static int dmar_map_gfx = 1;
484 static int dmar_forcedac;
485 static int intel_iommu_strict;
486 static int intel_iommu_superpage = 1;
487 static int intel_iommu_ecs = 1;
488 static int intel_iommu_pasid28;
489 static int iommu_identity_mapping;
491 #define IDENTMAP_ALL 1
492 #define IDENTMAP_GFX 2
493 #define IDENTMAP_AZALIA 4
495 /* Broadwell and Skylake have broken ECS support — normal so-called "second
496 * level" translation of DMA requests-without-PASID doesn't actually happen
497 * unless you also set the NESTE bit in an extended context-entry. Which of
498 * course means that SVM doesn't work because it's trying to do nested
499 * translation of the physical addresses it finds in the process page tables,
500 * through the IOVA->phys mapping found in the "second level" page tables.
502 * The VT-d specification was retroactively changed to change the definition
503 * of the capability bits and pretend that Broadwell/Skylake never happened...
504 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
505 * for some reason it was the PASID capability bit which was redefined (from
506 * bit 28 on BDW/SKL to bit 40 in future).
508 * So our test for ECS needs to eschew those implementations which set the old
509 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
510 * Unless we are working around the 'pasid28' limitations, that is, by putting
511 * the device into passthrough mode for normal DMA and thus masking the bug.
513 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
514 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
515 /* PASID support is thus enabled if ECS is enabled and *either* of the old
516 * or new capability bits are set. */
517 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
518 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
520 int intel_iommu_gfx_mapped;
521 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
523 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
524 static DEFINE_SPINLOCK(device_domain_lock);
525 static LIST_HEAD(device_domain_list);
527 const struct iommu_ops intel_iommu_ops;
529 static bool translation_pre_enabled(struct intel_iommu *iommu)
531 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
534 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
536 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
539 static void init_translation_status(struct intel_iommu *iommu)
543 gsts = readl(iommu->reg + DMAR_GSTS_REG);
544 if (gsts & DMA_GSTS_TES)
545 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
548 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
549 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
551 return container_of(dom, struct dmar_domain, domain);
554 static int __init intel_iommu_setup(char *str)
559 if (!strncmp(str, "on", 2)) {
561 pr_info("IOMMU enabled\n");
562 } else if (!strncmp(str, "off", 3)) {
564 pr_info("IOMMU disabled\n");
565 } else if (!strncmp(str, "igfx_off", 8)) {
567 pr_info("Disable GFX device mapping\n");
568 } else if (!strncmp(str, "forcedac", 8)) {
569 pr_info("Forcing DAC for PCI devices\n");
571 } else if (!strncmp(str, "strict", 6)) {
572 pr_info("Disable batched IOTLB flush\n");
573 intel_iommu_strict = 1;
574 } else if (!strncmp(str, "sp_off", 6)) {
575 pr_info("Disable supported super page\n");
576 intel_iommu_superpage = 0;
577 } else if (!strncmp(str, "ecs_off", 7)) {
579 "Intel-IOMMU: disable extended context table support\n");
581 } else if (!strncmp(str, "pasid28", 7)) {
583 "Intel-IOMMU: enable pre-production PASID support\n");
584 intel_iommu_pasid28 = 1;
585 iommu_identity_mapping |= IDENTMAP_GFX;
586 } else if (!strncmp(str, "tboot_noforce", 13)) {
588 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
589 intel_iommu_tboot_noforce = 1;
592 str += strcspn(str, ",");
598 __setup("intel_iommu=", intel_iommu_setup);
600 static struct kmem_cache *iommu_domain_cache;
601 static struct kmem_cache *iommu_devinfo_cache;
603 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
605 struct dmar_domain **domains;
608 domains = iommu->domains[idx];
612 return domains[did & 0xff];
615 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
616 struct dmar_domain *domain)
618 struct dmar_domain **domains;
621 if (!iommu->domains[idx]) {
622 size_t size = 256 * sizeof(struct dmar_domain *);
623 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
626 domains = iommu->domains[idx];
627 if (WARN_ON(!domains))
630 domains[did & 0xff] = domain;
633 static inline void *alloc_pgtable_page(int node)
638 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
640 vaddr = page_address(page);
644 static inline void free_pgtable_page(void *vaddr)
646 free_page((unsigned long)vaddr);
649 static inline void *alloc_domain_mem(void)
651 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
654 static void free_domain_mem(void *vaddr)
656 kmem_cache_free(iommu_domain_cache, vaddr);
659 static inline void * alloc_devinfo_mem(void)
661 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
664 static inline void free_devinfo_mem(void *vaddr)
666 kmem_cache_free(iommu_devinfo_cache, vaddr);
669 static inline int domain_type_is_vm(struct dmar_domain *domain)
671 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
674 static inline int domain_type_is_si(struct dmar_domain *domain)
676 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
679 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
681 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
682 DOMAIN_FLAG_STATIC_IDENTITY);
685 static inline int domain_pfn_supported(struct dmar_domain *domain,
688 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
690 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
693 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
698 sagaw = cap_sagaw(iommu->cap);
699 for (agaw = width_to_agaw(max_gaw);
701 if (test_bit(agaw, &sagaw))
709 * Calculate max SAGAW for each iommu.
711 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
713 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
717 * calculate agaw for each iommu.
718 * "SAGAW" may be different across iommus, use a default agaw, and
719 * get a supported less agaw for iommus that don't support the default agaw.
721 int iommu_calculate_agaw(struct intel_iommu *iommu)
723 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
726 /* This functionin only returns single iommu in a domain */
727 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
731 /* si_domain and vm domain should not get here. */
732 BUG_ON(domain_type_is_vm_or_si(domain));
733 for_each_domain_iommu(iommu_id, domain)
736 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
739 return g_iommus[iommu_id];
742 static void domain_update_iommu_coherency(struct dmar_domain *domain)
744 struct dmar_drhd_unit *drhd;
745 struct intel_iommu *iommu;
749 domain->iommu_coherency = 1;
751 for_each_domain_iommu(i, domain) {
753 if (!ecap_coherent(g_iommus[i]->ecap)) {
754 domain->iommu_coherency = 0;
761 /* No hardware attached; use lowest common denominator */
763 for_each_active_iommu(iommu, drhd) {
764 if (!ecap_coherent(iommu->ecap)) {
765 domain->iommu_coherency = 0;
772 static int domain_update_iommu_snooping(struct intel_iommu *skip)
774 struct dmar_drhd_unit *drhd;
775 struct intel_iommu *iommu;
779 for_each_active_iommu(iommu, drhd) {
781 if (!ecap_sc_support(iommu->ecap)) {
792 static int domain_update_iommu_superpage(struct intel_iommu *skip)
794 struct dmar_drhd_unit *drhd;
795 struct intel_iommu *iommu;
798 if (!intel_iommu_superpage) {
802 /* set iommu_superpage to the smallest common denominator */
804 for_each_active_iommu(iommu, drhd) {
806 mask &= cap_super_page_val(iommu->cap);
816 /* Some capabilities may be different across iommus */
817 static void domain_update_iommu_cap(struct dmar_domain *domain)
819 domain_update_iommu_coherency(domain);
820 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
821 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
824 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
825 u8 bus, u8 devfn, int alloc)
827 struct root_entry *root = &iommu->root_entry[bus];
828 struct context_entry *context;
832 if (ecs_enabled(iommu)) {
840 context = phys_to_virt(*entry & VTD_PAGE_MASK);
842 unsigned long phy_addr;
846 context = alloc_pgtable_page(iommu->node);
850 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
851 phy_addr = virt_to_phys((void *)context);
852 *entry = phy_addr | 1;
853 __iommu_flush_cache(iommu, entry, sizeof(*entry));
855 return &context[devfn];
858 static int iommu_dummy(struct device *dev)
860 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
863 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
865 struct dmar_drhd_unit *drhd = NULL;
866 struct intel_iommu *iommu;
868 struct pci_dev *ptmp, *pdev = NULL;
872 if (iommu_dummy(dev))
875 if (dev_is_pci(dev)) {
876 struct pci_dev *pf_pdev;
878 pdev = to_pci_dev(dev);
879 /* VFs aren't listed in scope tables; we need to look up
880 * the PF instead to find the IOMMU. */
881 pf_pdev = pci_physfn(pdev);
883 segment = pci_domain_nr(pdev->bus);
884 } else if (has_acpi_companion(dev))
885 dev = &ACPI_COMPANION(dev)->dev;
888 for_each_active_iommu(iommu, drhd) {
889 if (pdev && segment != drhd->segment)
892 for_each_active_dev_scope(drhd->devices,
893 drhd->devices_cnt, i, tmp) {
895 /* For a VF use its original BDF# not that of the PF
896 * which we used for the IOMMU lookup. Strictly speaking
897 * we could do this for all PCI devices; we only need to
898 * get the BDF# from the scope table for ACPI matches. */
899 if (pdev && pdev->is_virtfn)
902 *bus = drhd->devices[i].bus;
903 *devfn = drhd->devices[i].devfn;
907 if (!pdev || !dev_is_pci(tmp))
910 ptmp = to_pci_dev(tmp);
911 if (ptmp->subordinate &&
912 ptmp->subordinate->number <= pdev->bus->number &&
913 ptmp->subordinate->busn_res.end >= pdev->bus->number)
917 if (pdev && drhd->include_all) {
919 *bus = pdev->bus->number;
920 *devfn = pdev->devfn;
931 static void domain_flush_cache(struct dmar_domain *domain,
932 void *addr, int size)
934 if (!domain->iommu_coherency)
935 clflush_cache_range(addr, size);
938 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
940 struct context_entry *context;
944 spin_lock_irqsave(&iommu->lock, flags);
945 context = iommu_context_addr(iommu, bus, devfn, 0);
947 ret = context_present(context);
948 spin_unlock_irqrestore(&iommu->lock, flags);
952 static void free_context_table(struct intel_iommu *iommu)
956 struct context_entry *context;
958 spin_lock_irqsave(&iommu->lock, flags);
959 if (!iommu->root_entry) {
962 for (i = 0; i < ROOT_ENTRY_NR; i++) {
963 context = iommu_context_addr(iommu, i, 0, 0);
965 free_pgtable_page(context);
967 if (!ecs_enabled(iommu))
970 context = iommu_context_addr(iommu, i, 0x80, 0);
972 free_pgtable_page(context);
975 free_pgtable_page(iommu->root_entry);
976 iommu->root_entry = NULL;
978 spin_unlock_irqrestore(&iommu->lock, flags);
981 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
982 unsigned long pfn, int *target_level)
984 struct dma_pte *parent, *pte = NULL;
985 int level = agaw_to_level(domain->agaw);
988 BUG_ON(!domain->pgd);
990 if (!domain_pfn_supported(domain, pfn))
991 /* Address beyond IOMMU's addressing capabilities. */
994 parent = domain->pgd;
999 offset = pfn_level_offset(pfn, level);
1000 pte = &parent[offset];
1001 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1003 if (level == *target_level)
1006 if (!dma_pte_present(pte)) {
1009 tmp_page = alloc_pgtable_page(domain->nid);
1014 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1015 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1016 if (cmpxchg64(&pte->val, 0ULL, pteval))
1017 /* Someone else set it while we were thinking; use theirs. */
1018 free_pgtable_page(tmp_page);
1020 domain_flush_cache(domain, pte, sizeof(*pte));
1025 parent = phys_to_virt(dma_pte_addr(pte));
1030 *target_level = level;
1036 /* return address's pte at specific level */
1037 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1039 int level, int *large_page)
1041 struct dma_pte *parent, *pte = NULL;
1042 int total = agaw_to_level(domain->agaw);
1045 parent = domain->pgd;
1046 while (level <= total) {
1047 offset = pfn_level_offset(pfn, total);
1048 pte = &parent[offset];
1052 if (!dma_pte_present(pte)) {
1053 *large_page = total;
1057 if (dma_pte_superpage(pte)) {
1058 *large_page = total;
1062 parent = phys_to_virt(dma_pte_addr(pte));
1068 /* clear last level pte, a tlb flush should be followed */
1069 static void dma_pte_clear_range(struct dmar_domain *domain,
1070 unsigned long start_pfn,
1071 unsigned long last_pfn)
1073 unsigned int large_page = 1;
1074 struct dma_pte *first_pte, *pte;
1076 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1077 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1078 BUG_ON(start_pfn > last_pfn);
1080 /* we don't need lock here; nobody else touches the iova range */
1083 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1085 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1090 start_pfn += lvl_to_nr_pages(large_page);
1092 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1094 domain_flush_cache(domain, first_pte,
1095 (void *)pte - (void *)first_pte);
1097 } while (start_pfn && start_pfn <= last_pfn);
1100 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1101 int retain_level, struct dma_pte *pte,
1102 unsigned long pfn, unsigned long start_pfn,
1103 unsigned long last_pfn)
1105 pfn = max(start_pfn, pfn);
1106 pte = &pte[pfn_level_offset(pfn, level)];
1109 unsigned long level_pfn;
1110 struct dma_pte *level_pte;
1112 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1115 level_pfn = pfn & level_mask(level);
1116 level_pte = phys_to_virt(dma_pte_addr(pte));
1119 dma_pte_free_level(domain, level - 1, retain_level,
1120 level_pte, level_pfn, start_pfn,
1125 * Free the page table if we're below the level we want to
1126 * retain and the range covers the entire table.
1128 if (level < retain_level && !(start_pfn > level_pfn ||
1129 last_pfn < level_pfn + level_size(level) - 1)) {
1131 domain_flush_cache(domain, pte, sizeof(*pte));
1132 free_pgtable_page(level_pte);
1135 pfn += level_size(level);
1136 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1140 * clear last level (leaf) ptes and free page table pages below the
1141 * level we wish to keep intact.
1143 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1144 unsigned long start_pfn,
1145 unsigned long last_pfn,
1148 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1149 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1150 BUG_ON(start_pfn > last_pfn);
1152 dma_pte_clear_range(domain, start_pfn, last_pfn);
1154 /* We don't need lock here; nobody else touches the iova range */
1155 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1156 domain->pgd, 0, start_pfn, last_pfn);
1159 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1160 free_pgtable_page(domain->pgd);
1165 /* When a page at a given level is being unlinked from its parent, we don't
1166 need to *modify* it at all. All we need to do is make a list of all the
1167 pages which can be freed just as soon as we've flushed the IOTLB and we
1168 know the hardware page-walk will no longer touch them.
1169 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1171 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1172 int level, struct dma_pte *pte,
1173 struct page *freelist)
1177 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1178 pg->freelist = freelist;
1184 pte = page_address(pg);
1186 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1187 freelist = dma_pte_list_pagetables(domain, level - 1,
1190 } while (!first_pte_in_page(pte));
1195 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1196 struct dma_pte *pte, unsigned long pfn,
1197 unsigned long start_pfn,
1198 unsigned long last_pfn,
1199 struct page *freelist)
1201 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1203 pfn = max(start_pfn, pfn);
1204 pte = &pte[pfn_level_offset(pfn, level)];
1207 unsigned long level_pfn;
1209 if (!dma_pte_present(pte))
1212 level_pfn = pfn & level_mask(level);
1214 /* If range covers entire pagetable, free it */
1215 if (start_pfn <= level_pfn &&
1216 last_pfn >= level_pfn + level_size(level) - 1) {
1217 /* These suborbinate page tables are going away entirely. Don't
1218 bother to clear them; we're just going to *free* them. */
1219 if (level > 1 && !dma_pte_superpage(pte))
1220 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1226 } else if (level > 1) {
1227 /* Recurse down into a level that isn't *entirely* obsolete */
1228 freelist = dma_pte_clear_level(domain, level - 1,
1229 phys_to_virt(dma_pte_addr(pte)),
1230 level_pfn, start_pfn, last_pfn,
1234 pfn += level_size(level);
1235 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1238 domain_flush_cache(domain, first_pte,
1239 (void *)++last_pte - (void *)first_pte);
1244 /* We can't just free the pages because the IOMMU may still be walking
1245 the page tables, and may have cached the intermediate levels. The
1246 pages can only be freed after the IOTLB flush has been done. */
1247 static struct page *domain_unmap(struct dmar_domain *domain,
1248 unsigned long start_pfn,
1249 unsigned long last_pfn)
1251 struct page *freelist = NULL;
1253 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1254 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1255 BUG_ON(start_pfn > last_pfn);
1257 /* we don't need lock here; nobody else touches the iova range */
1258 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1259 domain->pgd, 0, start_pfn, last_pfn, NULL);
1262 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1263 struct page *pgd_page = virt_to_page(domain->pgd);
1264 pgd_page->freelist = freelist;
1265 freelist = pgd_page;
1273 static void dma_free_pagelist(struct page *freelist)
1277 while ((pg = freelist)) {
1278 freelist = pg->freelist;
1279 free_pgtable_page(page_address(pg));
1283 static void iova_entry_free(unsigned long data)
1285 struct page *freelist = (struct page *)data;
1287 dma_free_pagelist(freelist);
1290 /* iommu handling */
1291 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1293 struct root_entry *root;
1294 unsigned long flags;
1296 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1298 pr_err("Allocating root entry for %s failed\n",
1303 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1305 spin_lock_irqsave(&iommu->lock, flags);
1306 iommu->root_entry = root;
1307 spin_unlock_irqrestore(&iommu->lock, flags);
1312 static void iommu_set_root_entry(struct intel_iommu *iommu)
1318 addr = virt_to_phys(iommu->root_entry);
1319 if (ecs_enabled(iommu))
1320 addr |= DMA_RTADDR_RTT;
1322 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1323 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1325 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1327 /* Make sure hardware complete it */
1328 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1329 readl, (sts & DMA_GSTS_RTPS), sts);
1331 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1334 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1339 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1342 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1343 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1345 /* Make sure hardware complete it */
1346 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1347 readl, (!(val & DMA_GSTS_WBFS)), val);
1349 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1352 /* return value determine if we need a write buffer flush */
1353 static void __iommu_flush_context(struct intel_iommu *iommu,
1354 u16 did, u16 source_id, u8 function_mask,
1361 case DMA_CCMD_GLOBAL_INVL:
1362 val = DMA_CCMD_GLOBAL_INVL;
1364 case DMA_CCMD_DOMAIN_INVL:
1365 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1367 case DMA_CCMD_DEVICE_INVL:
1368 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1369 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1374 val |= DMA_CCMD_ICC;
1376 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1377 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1379 /* Make sure hardware complete it */
1380 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1381 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1383 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1386 /* return value determine if we need a write buffer flush */
1387 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1388 u64 addr, unsigned int size_order, u64 type)
1390 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1391 u64 val = 0, val_iva = 0;
1395 case DMA_TLB_GLOBAL_FLUSH:
1396 /* global flush doesn't need set IVA_REG */
1397 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1399 case DMA_TLB_DSI_FLUSH:
1400 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1402 case DMA_TLB_PSI_FLUSH:
1403 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1404 /* IH bit is passed in as part of address */
1405 val_iva = size_order | addr;
1410 /* Note: set drain read/write */
1413 * This is probably to be super secure.. Looks like we can
1414 * ignore it without any impact.
1416 if (cap_read_drain(iommu->cap))
1417 val |= DMA_TLB_READ_DRAIN;
1419 if (cap_write_drain(iommu->cap))
1420 val |= DMA_TLB_WRITE_DRAIN;
1422 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1423 /* Note: Only uses first TLB reg currently */
1425 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1426 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1428 /* Make sure hardware complete it */
1429 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1430 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1432 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1434 /* check IOTLB invalidation granularity */
1435 if (DMA_TLB_IAIG(val) == 0)
1436 pr_err("Flush IOTLB failed\n");
1437 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1438 pr_debug("TLB flush request %Lx, actual %Lx\n",
1439 (unsigned long long)DMA_TLB_IIRG(type),
1440 (unsigned long long)DMA_TLB_IAIG(val));
1443 static struct device_domain_info *
1444 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1447 struct device_domain_info *info;
1449 assert_spin_locked(&device_domain_lock);
1454 list_for_each_entry(info, &domain->devices, link)
1455 if (info->iommu == iommu && info->bus == bus &&
1456 info->devfn == devfn) {
1457 if (info->ats_supported && info->dev)
1465 static void domain_update_iotlb(struct dmar_domain *domain)
1467 struct device_domain_info *info;
1468 bool has_iotlb_device = false;
1470 assert_spin_locked(&device_domain_lock);
1472 list_for_each_entry(info, &domain->devices, link) {
1473 struct pci_dev *pdev;
1475 if (!info->dev || !dev_is_pci(info->dev))
1478 pdev = to_pci_dev(info->dev);
1479 if (pdev->ats_enabled) {
1480 has_iotlb_device = true;
1485 domain->has_iotlb_device = has_iotlb_device;
1488 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1490 struct pci_dev *pdev;
1492 assert_spin_locked(&device_domain_lock);
1494 if (!info || !dev_is_pci(info->dev))
1497 pdev = to_pci_dev(info->dev);
1499 #ifdef CONFIG_INTEL_IOMMU_SVM
1500 /* The PCIe spec, in its wisdom, declares that the behaviour of
1501 the device if you enable PASID support after ATS support is
1502 undefined. So always enable PASID support on devices which
1503 have it, even if we can't yet know if we're ever going to
1505 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1506 info->pasid_enabled = 1;
1508 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1509 info->pri_enabled = 1;
1511 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1512 info->ats_enabled = 1;
1513 domain_update_iotlb(info->domain);
1514 info->ats_qdep = pci_ats_queue_depth(pdev);
1518 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1520 struct pci_dev *pdev;
1522 assert_spin_locked(&device_domain_lock);
1524 if (!dev_is_pci(info->dev))
1527 pdev = to_pci_dev(info->dev);
1529 if (info->ats_enabled) {
1530 pci_disable_ats(pdev);
1531 info->ats_enabled = 0;
1532 domain_update_iotlb(info->domain);
1534 #ifdef CONFIG_INTEL_IOMMU_SVM
1535 if (info->pri_enabled) {
1536 pci_disable_pri(pdev);
1537 info->pri_enabled = 0;
1539 if (info->pasid_enabled) {
1540 pci_disable_pasid(pdev);
1541 info->pasid_enabled = 0;
1546 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1547 u64 addr, unsigned mask)
1550 unsigned long flags;
1551 struct device_domain_info *info;
1553 if (!domain->has_iotlb_device)
1556 spin_lock_irqsave(&device_domain_lock, flags);
1557 list_for_each_entry(info, &domain->devices, link) {
1558 if (!info->ats_enabled)
1561 sid = info->bus << 8 | info->devfn;
1562 qdep = info->ats_qdep;
1563 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1565 spin_unlock_irqrestore(&device_domain_lock, flags);
1568 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1569 struct dmar_domain *domain,
1570 unsigned long pfn, unsigned int pages,
1573 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1574 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1575 u16 did = domain->iommu_did[iommu->seq_id];
1582 * Fallback to domain selective flush if no PSI support or the size is
1584 * PSI requires page size to be 2 ^ x, and the base address is naturally
1585 * aligned to the size
1587 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1588 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1591 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1595 * In caching mode, changes of pages from non-present to present require
1596 * flush. However, device IOTLB doesn't need to be flushed in this case.
1598 if (!cap_caching_mode(iommu->cap) || !map)
1599 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1603 static void iommu_flush_iova(struct iova_domain *iovad)
1605 struct dmar_domain *domain;
1608 domain = container_of(iovad, struct dmar_domain, iovad);
1610 for_each_domain_iommu(idx, domain) {
1611 struct intel_iommu *iommu = g_iommus[idx];
1612 u16 did = domain->iommu_did[iommu->seq_id];
1614 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1616 if (!cap_caching_mode(iommu->cap))
1617 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1618 0, MAX_AGAW_PFN_WIDTH);
1622 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1625 unsigned long flags;
1627 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1628 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1629 pmen &= ~DMA_PMEN_EPM;
1630 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1632 /* wait for the protected region status bit to clear */
1633 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1634 readl, !(pmen & DMA_PMEN_PRS), pmen);
1636 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1639 static void iommu_enable_translation(struct intel_iommu *iommu)
1642 unsigned long flags;
1644 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1645 iommu->gcmd |= DMA_GCMD_TE;
1646 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1648 /* Make sure hardware complete it */
1649 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1650 readl, (sts & DMA_GSTS_TES), sts);
1652 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1655 static void iommu_disable_translation(struct intel_iommu *iommu)
1660 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1661 iommu->gcmd &= ~DMA_GCMD_TE;
1662 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1664 /* Make sure hardware complete it */
1665 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1666 readl, (!(sts & DMA_GSTS_TES)), sts);
1668 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1672 static int iommu_init_domains(struct intel_iommu *iommu)
1674 u32 ndomains, nlongs;
1677 ndomains = cap_ndoms(iommu->cap);
1678 pr_debug("%s: Number of Domains supported <%d>\n",
1679 iommu->name, ndomains);
1680 nlongs = BITS_TO_LONGS(ndomains);
1682 spin_lock_init(&iommu->lock);
1684 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1685 if (!iommu->domain_ids) {
1686 pr_err("%s: Allocating domain id array failed\n",
1691 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1692 iommu->domains = kzalloc(size, GFP_KERNEL);
1694 if (iommu->domains) {
1695 size = 256 * sizeof(struct dmar_domain *);
1696 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1699 if (!iommu->domains || !iommu->domains[0]) {
1700 pr_err("%s: Allocating domain array failed\n",
1702 kfree(iommu->domain_ids);
1703 kfree(iommu->domains);
1704 iommu->domain_ids = NULL;
1705 iommu->domains = NULL;
1712 * If Caching mode is set, then invalid translations are tagged
1713 * with domain-id 0, hence we need to pre-allocate it. We also
1714 * use domain-id 0 as a marker for non-allocated domain-id, so
1715 * make sure it is not used for a real domain.
1717 set_bit(0, iommu->domain_ids);
1722 static void disable_dmar_iommu(struct intel_iommu *iommu)
1724 struct device_domain_info *info, *tmp;
1725 unsigned long flags;
1727 if (!iommu->domains || !iommu->domain_ids)
1731 spin_lock_irqsave(&device_domain_lock, flags);
1732 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1733 struct dmar_domain *domain;
1735 if (info->iommu != iommu)
1738 if (!info->dev || !info->domain)
1741 domain = info->domain;
1743 __dmar_remove_one_dev_info(info);
1745 if (!domain_type_is_vm_or_si(domain)) {
1747 * The domain_exit() function can't be called under
1748 * device_domain_lock, as it takes this lock itself.
1749 * So release the lock here and re-run the loop
1752 spin_unlock_irqrestore(&device_domain_lock, flags);
1753 domain_exit(domain);
1757 spin_unlock_irqrestore(&device_domain_lock, flags);
1759 if (iommu->gcmd & DMA_GCMD_TE)
1760 iommu_disable_translation(iommu);
1763 static void free_dmar_iommu(struct intel_iommu *iommu)
1765 if ((iommu->domains) && (iommu->domain_ids)) {
1766 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1769 for (i = 0; i < elems; i++)
1770 kfree(iommu->domains[i]);
1771 kfree(iommu->domains);
1772 kfree(iommu->domain_ids);
1773 iommu->domains = NULL;
1774 iommu->domain_ids = NULL;
1777 g_iommus[iommu->seq_id] = NULL;
1779 /* free context mapping */
1780 free_context_table(iommu);
1782 #ifdef CONFIG_INTEL_IOMMU_SVM
1783 if (pasid_enabled(iommu)) {
1784 if (ecap_prs(iommu->ecap))
1785 intel_svm_finish_prq(iommu);
1786 intel_svm_free_pasid_tables(iommu);
1791 static struct dmar_domain *alloc_domain(int flags)
1793 struct dmar_domain *domain;
1795 domain = alloc_domain_mem();
1799 memset(domain, 0, sizeof(*domain));
1801 domain->flags = flags;
1802 domain->has_iotlb_device = false;
1803 INIT_LIST_HEAD(&domain->devices);
1808 /* Must be called with iommu->lock */
1809 static int domain_attach_iommu(struct dmar_domain *domain,
1810 struct intel_iommu *iommu)
1812 unsigned long ndomains;
1815 assert_spin_locked(&device_domain_lock);
1816 assert_spin_locked(&iommu->lock);
1818 domain->iommu_refcnt[iommu->seq_id] += 1;
1819 domain->iommu_count += 1;
1820 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1821 ndomains = cap_ndoms(iommu->cap);
1822 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1824 if (num >= ndomains) {
1825 pr_err("%s: No free domain ids\n", iommu->name);
1826 domain->iommu_refcnt[iommu->seq_id] -= 1;
1827 domain->iommu_count -= 1;
1831 set_bit(num, iommu->domain_ids);
1832 set_iommu_domain(iommu, num, domain);
1834 domain->iommu_did[iommu->seq_id] = num;
1835 domain->nid = iommu->node;
1837 domain_update_iommu_cap(domain);
1843 static int domain_detach_iommu(struct dmar_domain *domain,
1844 struct intel_iommu *iommu)
1846 int num, count = INT_MAX;
1848 assert_spin_locked(&device_domain_lock);
1849 assert_spin_locked(&iommu->lock);
1851 domain->iommu_refcnt[iommu->seq_id] -= 1;
1852 count = --domain->iommu_count;
1853 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1854 num = domain->iommu_did[iommu->seq_id];
1855 clear_bit(num, iommu->domain_ids);
1856 set_iommu_domain(iommu, num, NULL);
1858 domain_update_iommu_cap(domain);
1859 domain->iommu_did[iommu->seq_id] = 0;
1865 static struct iova_domain reserved_iova_list;
1866 static struct lock_class_key reserved_rbtree_key;
1868 static int dmar_init_reserved_ranges(void)
1870 struct pci_dev *pdev = NULL;
1874 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1877 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1878 &reserved_rbtree_key);
1880 /* IOAPIC ranges shouldn't be accessed by DMA */
1881 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1882 IOVA_PFN(IOAPIC_RANGE_END));
1884 pr_err("Reserve IOAPIC range failed\n");
1888 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1889 for_each_pci_dev(pdev) {
1892 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1893 r = &pdev->resource[i];
1894 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1896 iova = reserve_iova(&reserved_iova_list,
1900 pr_err("Reserve iova failed\n");
1908 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1910 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1913 static inline int guestwidth_to_adjustwidth(int gaw)
1916 int r = (gaw - 12) % 9;
1927 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1930 int adjust_width, agaw;
1931 unsigned long sagaw;
1934 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1937 err = init_iova_flush_queue(&domain->iovad,
1938 iommu_flush_iova, iova_entry_free);
1942 domain_reserve_special_ranges(domain);
1944 /* calculate AGAW */
1945 if (guest_width > cap_mgaw(iommu->cap))
1946 guest_width = cap_mgaw(iommu->cap);
1947 domain->gaw = guest_width;
1948 adjust_width = guestwidth_to_adjustwidth(guest_width);
1949 agaw = width_to_agaw(adjust_width);
1950 sagaw = cap_sagaw(iommu->cap);
1951 if (!test_bit(agaw, &sagaw)) {
1952 /* hardware doesn't support it, choose a bigger one */
1953 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1954 agaw = find_next_bit(&sagaw, 5, agaw);
1958 domain->agaw = agaw;
1960 if (ecap_coherent(iommu->ecap))
1961 domain->iommu_coherency = 1;
1963 domain->iommu_coherency = 0;
1965 if (ecap_sc_support(iommu->ecap))
1966 domain->iommu_snooping = 1;
1968 domain->iommu_snooping = 0;
1970 if (intel_iommu_superpage)
1971 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1973 domain->iommu_superpage = 0;
1975 domain->nid = iommu->node;
1977 /* always allocate the top pgd */
1978 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1981 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1985 static void domain_exit(struct dmar_domain *domain)
1987 struct page *freelist = NULL;
1989 /* Domain 0 is reserved, so dont process it */
1993 /* Remove associated devices and clear attached or cached domains */
1995 domain_remove_dev_info(domain);
1999 put_iova_domain(&domain->iovad);
2001 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2003 dma_free_pagelist(freelist);
2005 free_domain_mem(domain);
2008 static int domain_context_mapping_one(struct dmar_domain *domain,
2009 struct intel_iommu *iommu,
2012 u16 did = domain->iommu_did[iommu->seq_id];
2013 int translation = CONTEXT_TT_MULTI_LEVEL;
2014 struct device_domain_info *info = NULL;
2015 struct context_entry *context;
2016 unsigned long flags;
2017 struct dma_pte *pgd;
2022 if (hw_pass_through && domain_type_is_si(domain))
2023 translation = CONTEXT_TT_PASS_THROUGH;
2025 pr_debug("Set context mapping for %02x:%02x.%d\n",
2026 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2028 BUG_ON(!domain->pgd);
2030 spin_lock_irqsave(&device_domain_lock, flags);
2031 spin_lock(&iommu->lock);
2034 context = iommu_context_addr(iommu, bus, devfn, 1);
2039 if (context_present(context))
2043 * For kdump cases, old valid entries may be cached due to the
2044 * in-flight DMA and copied pgtable, but there is no unmapping
2045 * behaviour for them, thus we need an explicit cache flush for
2046 * the newly-mapped device. For kdump, at this point, the device
2047 * is supposed to finish reset at its driver probe stage, so no
2048 * in-flight DMA will exist, and we don't need to worry anymore
2051 if (context_copied(context)) {
2052 u16 did_old = context_domain_id(context);
2054 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
2055 iommu->flush.flush_context(iommu, did_old,
2056 (((u16)bus) << 8) | devfn,
2057 DMA_CCMD_MASK_NOBIT,
2058 DMA_CCMD_DEVICE_INVL);
2059 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2066 context_clear_entry(context);
2067 context_set_domain_id(context, did);
2070 * Skip top levels of page tables for iommu which has less agaw
2071 * than default. Unnecessary for PT mode.
2073 if (translation != CONTEXT_TT_PASS_THROUGH) {
2074 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2076 pgd = phys_to_virt(dma_pte_addr(pgd));
2077 if (!dma_pte_present(pgd))
2081 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2082 if (info && info->ats_supported)
2083 translation = CONTEXT_TT_DEV_IOTLB;
2085 translation = CONTEXT_TT_MULTI_LEVEL;
2087 context_set_address_root(context, virt_to_phys(pgd));
2088 context_set_address_width(context, iommu->agaw);
2091 * In pass through mode, AW must be programmed to
2092 * indicate the largest AGAW value supported by
2093 * hardware. And ASR is ignored by hardware.
2095 context_set_address_width(context, iommu->msagaw);
2098 context_set_translation_type(context, translation);
2099 context_set_fault_enable(context);
2100 context_set_present(context);
2101 domain_flush_cache(domain, context, sizeof(*context));
2104 * It's a non-present to present mapping. If hardware doesn't cache
2105 * non-present entry we only need to flush the write-buffer. If the
2106 * _does_ cache non-present entries, then it does so in the special
2107 * domain #0, which we have to flush:
2109 if (cap_caching_mode(iommu->cap)) {
2110 iommu->flush.flush_context(iommu, 0,
2111 (((u16)bus) << 8) | devfn,
2112 DMA_CCMD_MASK_NOBIT,
2113 DMA_CCMD_DEVICE_INVL);
2114 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2116 iommu_flush_write_buffer(iommu);
2118 iommu_enable_dev_iotlb(info);
2123 spin_unlock(&iommu->lock);
2124 spin_unlock_irqrestore(&device_domain_lock, flags);
2129 struct domain_context_mapping_data {
2130 struct dmar_domain *domain;
2131 struct intel_iommu *iommu;
2134 static int domain_context_mapping_cb(struct pci_dev *pdev,
2135 u16 alias, void *opaque)
2137 struct domain_context_mapping_data *data = opaque;
2139 return domain_context_mapping_one(data->domain, data->iommu,
2140 PCI_BUS_NUM(alias), alias & 0xff);
2144 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2146 struct intel_iommu *iommu;
2148 struct domain_context_mapping_data data;
2150 iommu = device_to_iommu(dev, &bus, &devfn);
2154 if (!dev_is_pci(dev))
2155 return domain_context_mapping_one(domain, iommu, bus, devfn);
2157 data.domain = domain;
2160 return pci_for_each_dma_alias(to_pci_dev(dev),
2161 &domain_context_mapping_cb, &data);
2164 static int domain_context_mapped_cb(struct pci_dev *pdev,
2165 u16 alias, void *opaque)
2167 struct intel_iommu *iommu = opaque;
2169 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2172 static int domain_context_mapped(struct device *dev)
2174 struct intel_iommu *iommu;
2177 iommu = device_to_iommu(dev, &bus, &devfn);
2181 if (!dev_is_pci(dev))
2182 return device_context_mapped(iommu, bus, devfn);
2184 return !pci_for_each_dma_alias(to_pci_dev(dev),
2185 domain_context_mapped_cb, iommu);
2188 /* Returns a number of VTD pages, but aligned to MM page size */
2189 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2192 host_addr &= ~PAGE_MASK;
2193 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2196 /* Return largest possible superpage level for a given mapping */
2197 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2198 unsigned long iov_pfn,
2199 unsigned long phy_pfn,
2200 unsigned long pages)
2202 int support, level = 1;
2203 unsigned long pfnmerge;
2205 support = domain->iommu_superpage;
2207 /* To use a large page, the virtual *and* physical addresses
2208 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2209 of them will mean we have to use smaller pages. So just
2210 merge them and check both at once. */
2211 pfnmerge = iov_pfn | phy_pfn;
2213 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2214 pages >>= VTD_STRIDE_SHIFT;
2217 pfnmerge >>= VTD_STRIDE_SHIFT;
2224 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2225 struct scatterlist *sg, unsigned long phys_pfn,
2226 unsigned long nr_pages, int prot)
2228 struct dma_pte *first_pte = NULL, *pte = NULL;
2229 phys_addr_t uninitialized_var(pteval);
2230 unsigned long sg_res = 0;
2231 unsigned int largepage_lvl = 0;
2232 unsigned long lvl_pages = 0;
2234 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2236 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2239 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2243 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2246 while (nr_pages > 0) {
2250 sg_res = aligned_nrpages(sg->offset, sg->length);
2251 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2252 sg->dma_length = sg->length;
2253 pteval = page_to_phys(sg_page(sg)) | prot;
2254 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2258 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2260 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2263 /* It is large page*/
2264 if (largepage_lvl > 1) {
2265 unsigned long nr_superpages, end_pfn;
2267 pteval |= DMA_PTE_LARGE_PAGE;
2268 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2270 nr_superpages = sg_res / lvl_pages;
2271 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2274 * Ensure that old small page tables are
2275 * removed to make room for superpage(s).
2276 * We're adding new large pages, so make sure
2277 * we don't remove their parent tables.
2279 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2282 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2286 /* We don't need lock here, nobody else
2287 * touches the iova range
2289 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2291 static int dumps = 5;
2292 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2293 iov_pfn, tmp, (unsigned long long)pteval);
2296 debug_dma_dump_mappings(NULL);
2301 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2303 BUG_ON(nr_pages < lvl_pages);
2304 BUG_ON(sg_res < lvl_pages);
2306 nr_pages -= lvl_pages;
2307 iov_pfn += lvl_pages;
2308 phys_pfn += lvl_pages;
2309 pteval += lvl_pages * VTD_PAGE_SIZE;
2310 sg_res -= lvl_pages;
2312 /* If the next PTE would be the first in a new page, then we
2313 need to flush the cache on the entries we've just written.
2314 And then we'll need to recalculate 'pte', so clear it and
2315 let it get set again in the if (!pte) block above.
2317 If we're done (!nr_pages) we need to flush the cache too.
2319 Also if we've been setting superpages, we may need to
2320 recalculate 'pte' and switch back to smaller pages for the
2321 end of the mapping, if the trailing size is not enough to
2322 use another superpage (i.e. sg_res < lvl_pages). */
2324 if (!nr_pages || first_pte_in_page(pte) ||
2325 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2326 domain_flush_cache(domain, first_pte,
2327 (void *)pte - (void *)first_pte);
2331 if (!sg_res && nr_pages)
2337 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2338 struct scatterlist *sg, unsigned long nr_pages,
2341 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2344 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2345 unsigned long phys_pfn, unsigned long nr_pages,
2348 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2351 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2353 unsigned long flags;
2354 struct context_entry *context;
2360 spin_lock_irqsave(&iommu->lock, flags);
2361 context = iommu_context_addr(iommu, bus, devfn, 0);
2363 spin_unlock_irqrestore(&iommu->lock, flags);
2366 did_old = context_domain_id(context);
2367 context_clear_entry(context);
2368 __iommu_flush_cache(iommu, context, sizeof(*context));
2369 spin_unlock_irqrestore(&iommu->lock, flags);
2370 iommu->flush.flush_context(iommu,
2372 (((u16)bus) << 8) | devfn,
2373 DMA_CCMD_MASK_NOBIT,
2374 DMA_CCMD_DEVICE_INVL);
2375 iommu->flush.flush_iotlb(iommu,
2382 static inline void unlink_domain_info(struct device_domain_info *info)
2384 assert_spin_locked(&device_domain_lock);
2385 list_del(&info->link);
2386 list_del(&info->global);
2388 info->dev->archdata.iommu = NULL;
2391 static void domain_remove_dev_info(struct dmar_domain *domain)
2393 struct device_domain_info *info, *tmp;
2394 unsigned long flags;
2396 spin_lock_irqsave(&device_domain_lock, flags);
2397 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2398 __dmar_remove_one_dev_info(info);
2399 spin_unlock_irqrestore(&device_domain_lock, flags);
2404 * Note: we use struct device->archdata.iommu stores the info
2406 static struct dmar_domain *find_domain(struct device *dev)
2408 struct device_domain_info *info;
2410 /* No lock here, assumes no domain exit in normal case */
2411 info = dev->archdata.iommu;
2413 return info->domain;
2417 static inline struct device_domain_info *
2418 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2420 struct device_domain_info *info;
2422 list_for_each_entry(info, &device_domain_list, global)
2423 if (info->iommu->segment == segment && info->bus == bus &&
2424 info->devfn == devfn)
2430 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2433 struct dmar_domain *domain)
2435 struct dmar_domain *found = NULL;
2436 struct device_domain_info *info;
2437 unsigned long flags;
2440 info = alloc_devinfo_mem();
2445 info->devfn = devfn;
2446 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2447 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2450 info->domain = domain;
2451 info->iommu = iommu;
2453 if (dev && dev_is_pci(dev)) {
2454 struct pci_dev *pdev = to_pci_dev(info->dev);
2456 if (ecap_dev_iotlb_support(iommu->ecap) &&
2457 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2458 dmar_find_matched_atsr_unit(pdev))
2459 info->ats_supported = 1;
2461 if (ecs_enabled(iommu)) {
2462 if (pasid_enabled(iommu)) {
2463 int features = pci_pasid_features(pdev);
2465 info->pasid_supported = features | 1;
2468 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2469 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2470 info->pri_supported = 1;
2474 spin_lock_irqsave(&device_domain_lock, flags);
2476 found = find_domain(dev);
2479 struct device_domain_info *info2;
2480 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2482 found = info2->domain;
2488 spin_unlock_irqrestore(&device_domain_lock, flags);
2489 free_devinfo_mem(info);
2490 /* Caller must free the original domain */
2494 spin_lock(&iommu->lock);
2495 ret = domain_attach_iommu(domain, iommu);
2496 spin_unlock(&iommu->lock);
2499 spin_unlock_irqrestore(&device_domain_lock, flags);
2500 free_devinfo_mem(info);
2504 list_add(&info->link, &domain->devices);
2505 list_add(&info->global, &device_domain_list);
2507 dev->archdata.iommu = info;
2508 spin_unlock_irqrestore(&device_domain_lock, flags);
2510 if (dev && domain_context_mapping(domain, dev)) {
2511 pr_err("Domain context map for %s failed\n", dev_name(dev));
2512 dmar_remove_one_dev_info(domain, dev);
2519 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2521 *(u16 *)opaque = alias;
2525 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2527 struct device_domain_info *info = NULL;
2528 struct dmar_domain *domain = NULL;
2529 struct intel_iommu *iommu;
2530 u16 req_id, dma_alias;
2531 unsigned long flags;
2534 iommu = device_to_iommu(dev, &bus, &devfn);
2538 req_id = ((u16)bus << 8) | devfn;
2540 if (dev_is_pci(dev)) {
2541 struct pci_dev *pdev = to_pci_dev(dev);
2543 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2545 spin_lock_irqsave(&device_domain_lock, flags);
2546 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2547 PCI_BUS_NUM(dma_alias),
2550 iommu = info->iommu;
2551 domain = info->domain;
2553 spin_unlock_irqrestore(&device_domain_lock, flags);
2555 /* DMA alias already has a domain, use it */
2560 /* Allocate and initialize new domain for the device */
2561 domain = alloc_domain(0);
2564 if (domain_init(domain, iommu, gaw)) {
2565 domain_exit(domain);
2574 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2575 struct dmar_domain *domain)
2577 struct intel_iommu *iommu;
2578 struct dmar_domain *tmp;
2579 u16 req_id, dma_alias;
2582 iommu = device_to_iommu(dev, &bus, &devfn);
2586 req_id = ((u16)bus << 8) | devfn;
2588 if (dev_is_pci(dev)) {
2589 struct pci_dev *pdev = to_pci_dev(dev);
2591 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2593 /* register PCI DMA alias device */
2594 if (req_id != dma_alias) {
2595 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2596 dma_alias & 0xff, NULL, domain);
2598 if (!tmp || tmp != domain)
2603 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2604 if (!tmp || tmp != domain)
2610 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2612 struct dmar_domain *domain, *tmp;
2614 domain = find_domain(dev);
2618 domain = find_or_alloc_domain(dev, gaw);
2622 tmp = set_domain_for_dev(dev, domain);
2623 if (!tmp || domain != tmp) {
2624 domain_exit(domain);
2633 static int iommu_domain_identity_map(struct dmar_domain *domain,
2634 unsigned long long start,
2635 unsigned long long end)
2637 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2638 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2640 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2641 dma_to_mm_pfn(last_vpfn))) {
2642 pr_err("Reserving iova failed\n");
2646 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2648 * RMRR range might have overlap with physical memory range,
2651 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2653 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2654 last_vpfn - first_vpfn + 1,
2655 DMA_PTE_READ|DMA_PTE_WRITE);
2658 static int domain_prepare_identity_map(struct device *dev,
2659 struct dmar_domain *domain,
2660 unsigned long long start,
2661 unsigned long long end)
2663 /* For _hardware_ passthrough, don't bother. But for software
2664 passthrough, we do it anyway -- it may indicate a memory
2665 range which is reserved in E820, so which didn't get set
2666 up to start with in si_domain */
2667 if (domain == si_domain && hw_pass_through) {
2668 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2669 dev_name(dev), start, end);
2673 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2674 dev_name(dev), start, end);
2677 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2678 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2679 dmi_get_system_info(DMI_BIOS_VENDOR),
2680 dmi_get_system_info(DMI_BIOS_VERSION),
2681 dmi_get_system_info(DMI_PRODUCT_VERSION));
2685 if (end >> agaw_to_width(domain->agaw)) {
2686 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2687 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2688 agaw_to_width(domain->agaw),
2689 dmi_get_system_info(DMI_BIOS_VENDOR),
2690 dmi_get_system_info(DMI_BIOS_VERSION),
2691 dmi_get_system_info(DMI_PRODUCT_VERSION));
2695 return iommu_domain_identity_map(domain, start, end);
2698 static int iommu_prepare_identity_map(struct device *dev,
2699 unsigned long long start,
2700 unsigned long long end)
2702 struct dmar_domain *domain;
2705 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2709 ret = domain_prepare_identity_map(dev, domain, start, end);
2711 domain_exit(domain);
2716 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2719 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2721 return iommu_prepare_identity_map(dev, rmrr->base_address,
2725 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2726 static inline void iommu_prepare_isa(void)
2728 struct pci_dev *pdev;
2731 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2735 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2736 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2739 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2744 static inline void iommu_prepare_isa(void)
2748 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2750 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2752 static int __init si_domain_init(int hw)
2756 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2760 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2761 domain_exit(si_domain);
2765 pr_debug("Identity mapping domain allocated\n");
2770 for_each_online_node(nid) {
2771 unsigned long start_pfn, end_pfn;
2774 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2775 ret = iommu_domain_identity_map(si_domain,
2776 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2785 static int identity_mapping(struct device *dev)
2787 struct device_domain_info *info;
2789 if (likely(!iommu_identity_mapping))
2792 info = dev->archdata.iommu;
2793 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2794 return (info->domain == si_domain);
2799 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2801 struct dmar_domain *ndomain;
2802 struct intel_iommu *iommu;
2805 iommu = device_to_iommu(dev, &bus, &devfn);
2809 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2810 if (ndomain != domain)
2816 static bool device_has_rmrr(struct device *dev)
2818 struct dmar_rmrr_unit *rmrr;
2823 for_each_rmrr_units(rmrr) {
2825 * Return TRUE if this RMRR contains the device that
2828 for_each_active_dev_scope(rmrr->devices,
2829 rmrr->devices_cnt, i, tmp)
2840 * There are a couple cases where we need to restrict the functionality of
2841 * devices associated with RMRRs. The first is when evaluating a device for
2842 * identity mapping because problems exist when devices are moved in and out
2843 * of domains and their respective RMRR information is lost. This means that
2844 * a device with associated RMRRs will never be in a "passthrough" domain.
2845 * The second is use of the device through the IOMMU API. This interface
2846 * expects to have full control of the IOVA space for the device. We cannot
2847 * satisfy both the requirement that RMRR access is maintained and have an
2848 * unencumbered IOVA space. We also have no ability to quiesce the device's
2849 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2850 * We therefore prevent devices associated with an RMRR from participating in
2851 * the IOMMU API, which eliminates them from device assignment.
2853 * In both cases we assume that PCI USB devices with RMRRs have them largely
2854 * for historical reasons and that the RMRR space is not actively used post
2855 * boot. This exclusion may change if vendors begin to abuse it.
2857 * The same exception is made for graphics devices, with the requirement that
2858 * any use of the RMRR regions will be torn down before assigning the device
2861 static bool device_is_rmrr_locked(struct device *dev)
2863 if (!device_has_rmrr(dev))
2866 if (dev_is_pci(dev)) {
2867 struct pci_dev *pdev = to_pci_dev(dev);
2869 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2876 static int iommu_should_identity_map(struct device *dev, int startup)
2879 if (dev_is_pci(dev)) {
2880 struct pci_dev *pdev = to_pci_dev(dev);
2882 if (device_is_rmrr_locked(dev))
2885 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2888 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2891 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2895 * We want to start off with all devices in the 1:1 domain, and
2896 * take them out later if we find they can't access all of memory.
2898 * However, we can't do this for PCI devices behind bridges,
2899 * because all PCI devices behind the same bridge will end up
2900 * with the same source-id on their transactions.
2902 * Practically speaking, we can't change things around for these
2903 * devices at run-time, because we can't be sure there'll be no
2904 * DMA transactions in flight for any of their siblings.
2906 * So PCI devices (unless they're on the root bus) as well as
2907 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2908 * the 1:1 domain, just in _case_ one of their siblings turns out
2909 * not to be able to map all of memory.
2911 if (!pci_is_pcie(pdev)) {
2912 if (!pci_is_root_bus(pdev->bus))
2914 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2916 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2919 if (device_has_rmrr(dev))
2924 * At boot time, we don't yet know if devices will be 64-bit capable.
2925 * Assume that they will — if they turn out not to be, then we can
2926 * take them out of the 1:1 domain later.
2930 * If the device's dma_mask is less than the system's memory
2931 * size then this is not a candidate for identity mapping.
2933 u64 dma_mask = *dev->dma_mask;
2935 if (dev->coherent_dma_mask &&
2936 dev->coherent_dma_mask < dma_mask)
2937 dma_mask = dev->coherent_dma_mask;
2939 return dma_mask >= dma_get_required_mask(dev);
2945 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2949 if (!iommu_should_identity_map(dev, 1))
2952 ret = domain_add_dev_info(si_domain, dev);
2954 pr_info("%s identity mapping for device %s\n",
2955 hw ? "Hardware" : "Software", dev_name(dev));
2956 else if (ret == -ENODEV)
2957 /* device not associated with an iommu */
2964 static int __init iommu_prepare_static_identity_mapping(int hw)
2966 struct pci_dev *pdev = NULL;
2967 struct dmar_drhd_unit *drhd;
2968 struct intel_iommu *iommu;
2973 for_each_pci_dev(pdev) {
2974 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2979 for_each_active_iommu(iommu, drhd)
2980 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2981 struct acpi_device_physical_node *pn;
2982 struct acpi_device *adev;
2984 if (dev->bus != &acpi_bus_type)
2987 adev= to_acpi_device(dev);
2988 mutex_lock(&adev->physical_node_lock);
2989 list_for_each_entry(pn, &adev->physical_node_list, node) {
2990 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2994 mutex_unlock(&adev->physical_node_lock);
3002 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3005 * Start from the sane iommu hardware state.
3006 * If the queued invalidation is already initialized by us
3007 * (for example, while enabling interrupt-remapping) then
3008 * we got the things already rolling from a sane state.
3012 * Clear any previous faults.
3014 dmar_fault(-1, iommu);
3016 * Disable queued invalidation if supported and already enabled
3017 * before OS handover.
3019 dmar_disable_qi(iommu);
3022 if (dmar_enable_qi(iommu)) {
3024 * Queued Invalidate not enabled, use Register Based Invalidate
3026 iommu->flush.flush_context = __iommu_flush_context;
3027 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3028 pr_info("%s: Using Register based invalidation\n",
3031 iommu->flush.flush_context = qi_flush_context;
3032 iommu->flush.flush_iotlb = qi_flush_iotlb;
3033 pr_info("%s: Using Queued invalidation\n", iommu->name);
3037 static int copy_context_table(struct intel_iommu *iommu,
3038 struct root_entry *old_re,
3039 struct context_entry **tbl,
3042 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3043 struct context_entry *new_ce = NULL, ce;
3044 struct context_entry *old_ce = NULL;
3045 struct root_entry re;
3046 phys_addr_t old_ce_phys;
3048 tbl_idx = ext ? bus * 2 : bus;
3049 memcpy(&re, old_re, sizeof(re));
3051 for (devfn = 0; devfn < 256; devfn++) {
3052 /* First calculate the correct index */
3053 idx = (ext ? devfn * 2 : devfn) % 256;
3056 /* First save what we may have and clean up */
3058 tbl[tbl_idx] = new_ce;
3059 __iommu_flush_cache(iommu, new_ce,
3069 old_ce_phys = root_entry_lctp(&re);
3071 old_ce_phys = root_entry_uctp(&re);
3074 if (ext && devfn == 0) {
3075 /* No LCTP, try UCTP */
3084 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3089 new_ce = alloc_pgtable_page(iommu->node);
3096 /* Now copy the context entry */
3097 memcpy(&ce, old_ce + idx, sizeof(ce));
3099 if (!__context_present(&ce))
3102 did = context_domain_id(&ce);
3103 if (did >= 0 && did < cap_ndoms(iommu->cap))
3104 set_bit(did, iommu->domain_ids);
3107 * We need a marker for copied context entries. This
3108 * marker needs to work for the old format as well as
3109 * for extended context entries.
3111 * Bit 67 of the context entry is used. In the old
3112 * format this bit is available to software, in the
3113 * extended format it is the PGE bit, but PGE is ignored
3114 * by HW if PASIDs are disabled (and thus still
3117 * So disable PASIDs first and then mark the entry
3118 * copied. This means that we don't copy PASID
3119 * translations from the old kernel, but this is fine as
3120 * faults there are not fatal.
3122 context_clear_pasid_enable(&ce);
3123 context_set_copied(&ce);
3128 tbl[tbl_idx + pos] = new_ce;
3130 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3139 static int copy_translation_tables(struct intel_iommu *iommu)
3141 struct context_entry **ctxt_tbls;
3142 struct root_entry *old_rt;
3143 phys_addr_t old_rt_phys;
3144 int ctxt_table_entries;
3145 unsigned long flags;
3150 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3151 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3152 new_ext = !!ecap_ecs(iommu->ecap);
3155 * The RTT bit can only be changed when translation is disabled,
3156 * but disabling translation means to open a window for data
3157 * corruption. So bail out and don't copy anything if we would
3158 * have to change the bit.
3163 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3167 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3171 /* This is too big for the stack - allocate it from slab */
3172 ctxt_table_entries = ext ? 512 : 256;
3174 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3178 for (bus = 0; bus < 256; bus++) {
3179 ret = copy_context_table(iommu, &old_rt[bus],
3180 ctxt_tbls, bus, ext);
3182 pr_err("%s: Failed to copy context table for bus %d\n",
3188 spin_lock_irqsave(&iommu->lock, flags);
3190 /* Context tables are copied, now write them to the root_entry table */
3191 for (bus = 0; bus < 256; bus++) {
3192 int idx = ext ? bus * 2 : bus;
3195 if (ctxt_tbls[idx]) {
3196 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3197 iommu->root_entry[bus].lo = val;
3200 if (!ext || !ctxt_tbls[idx + 1])
3203 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3204 iommu->root_entry[bus].hi = val;
3207 spin_unlock_irqrestore(&iommu->lock, flags);
3211 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3221 static int __init init_dmars(void)
3223 struct dmar_drhd_unit *drhd;
3224 struct dmar_rmrr_unit *rmrr;
3225 bool copied_tables = false;
3227 struct intel_iommu *iommu;
3233 * initialize and program root entry to not present
3236 for_each_drhd_unit(drhd) {
3238 * lock not needed as this is only incremented in the single
3239 * threaded kernel __init code path all other access are read
3242 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3246 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3249 /* Preallocate enough resources for IOMMU hot-addition */
3250 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3251 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3253 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3256 pr_err("Allocating global iommu array failed\n");
3261 for_each_active_iommu(iommu, drhd) {
3262 g_iommus[iommu->seq_id] = iommu;
3264 intel_iommu_init_qi(iommu);
3266 ret = iommu_init_domains(iommu);
3270 init_translation_status(iommu);
3272 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3273 iommu_disable_translation(iommu);
3274 clear_translation_pre_enabled(iommu);
3275 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3281 * we could share the same root & context tables
3282 * among all IOMMU's. Need to Split it later.
3284 ret = iommu_alloc_root_entry(iommu);
3288 if (translation_pre_enabled(iommu)) {
3289 pr_info("Translation already enabled - trying to copy translation structures\n");
3291 ret = copy_translation_tables(iommu);
3294 * We found the IOMMU with translation
3295 * enabled - but failed to copy over the
3296 * old root-entry table. Try to proceed
3297 * by disabling translation now and
3298 * allocating a clean root-entry table.
3299 * This might cause DMAR faults, but
3300 * probably the dump will still succeed.
3302 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3304 iommu_disable_translation(iommu);
3305 clear_translation_pre_enabled(iommu);
3307 pr_info("Copied translation tables from previous kernel for %s\n",
3309 copied_tables = true;
3313 if (!ecap_pass_through(iommu->ecap))
3314 hw_pass_through = 0;
3315 #ifdef CONFIG_INTEL_IOMMU_SVM
3316 if (pasid_enabled(iommu))
3317 intel_svm_alloc_pasid_tables(iommu);
3322 * Now that qi is enabled on all iommus, set the root entry and flush
3323 * caches. This is required on some Intel X58 chipsets, otherwise the
3324 * flush_context function will loop forever and the boot hangs.
3326 for_each_active_iommu(iommu, drhd) {
3327 iommu_flush_write_buffer(iommu);
3328 iommu_set_root_entry(iommu);
3329 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3330 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3333 if (iommu_pass_through)
3334 iommu_identity_mapping |= IDENTMAP_ALL;
3336 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3337 iommu_identity_mapping |= IDENTMAP_GFX;
3340 check_tylersburg_isoch();
3342 if (iommu_identity_mapping) {
3343 ret = si_domain_init(hw_pass_through);
3350 * If we copied translations from a previous kernel in the kdump
3351 * case, we can not assign the devices to domains now, as that
3352 * would eliminate the old mappings. So skip this part and defer
3353 * the assignment to device driver initialization time.
3359 * If pass through is not set or not enabled, setup context entries for
3360 * identity mappings for rmrr, gfx, and isa and may fall back to static
3361 * identity mapping if iommu_identity_mapping is set.
3363 if (iommu_identity_mapping) {
3364 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3366 pr_crit("Failed to setup IOMMU pass-through\n");
3372 * for each dev attached to rmrr
3374 * locate drhd for dev, alloc domain for dev
3375 * allocate free domain
3376 * allocate page table entries for rmrr
3377 * if context not allocated for bus
3378 * allocate and init context
3379 * set present in root table for this bus
3380 * init context with domain, translation etc
3384 pr_info("Setting RMRR:\n");
3385 for_each_rmrr_units(rmrr) {
3386 /* some BIOS lists non-exist devices in DMAR table. */
3387 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3389 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3391 pr_err("Mapping reserved region failed\n");
3395 iommu_prepare_isa();
3402 * global invalidate context cache
3403 * global invalidate iotlb
3404 * enable translation
3406 for_each_iommu(iommu, drhd) {
3407 if (drhd->ignored) {
3409 * we always have to disable PMRs or DMA may fail on
3413 iommu_disable_protect_mem_regions(iommu);
3417 iommu_flush_write_buffer(iommu);
3419 #ifdef CONFIG_INTEL_IOMMU_SVM
3420 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3421 ret = intel_svm_enable_prq(iommu);
3426 ret = dmar_set_interrupt(iommu);
3430 if (!translation_pre_enabled(iommu))
3431 iommu_enable_translation(iommu);
3433 iommu_disable_protect_mem_regions(iommu);
3439 for_each_active_iommu(iommu, drhd) {
3440 disable_dmar_iommu(iommu);
3441 free_dmar_iommu(iommu);
3450 /* This takes a number of _MM_ pages, not VTD pages */
3451 static unsigned long intel_alloc_iova(struct device *dev,
3452 struct dmar_domain *domain,
3453 unsigned long nrpages, uint64_t dma_mask)
3455 unsigned long iova_pfn = 0;
3457 /* Restrict dma_mask to the width that the iommu can handle */
3458 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3459 /* Ensure we reserve the whole size-aligned region */
3460 nrpages = __roundup_pow_of_two(nrpages);
3462 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3464 * First try to allocate an io virtual address in
3465 * DMA_BIT_MASK(32) and if that fails then try allocating
3468 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3469 IOVA_PFN(DMA_BIT_MASK(32)));
3473 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3474 if (unlikely(!iova_pfn)) {
3475 pr_err("Allocating %ld-page iova for %s failed",
3476 nrpages, dev_name(dev));
3483 static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3485 struct dmar_domain *domain, *tmp;
3486 struct dmar_rmrr_unit *rmrr;
3487 struct device *i_dev;
3490 domain = find_domain(dev);
3494 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3498 /* We have a new domain - setup possible RMRRs for the device */
3500 for_each_rmrr_units(rmrr) {
3501 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3506 ret = domain_prepare_identity_map(dev, domain,
3510 dev_err(dev, "Mapping reserved region failed\n");
3515 tmp = set_domain_for_dev(dev, domain);
3516 if (!tmp || domain != tmp) {
3517 domain_exit(domain);
3524 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3530 /* Check if the dev needs to go through non-identity map and unmap process.*/
3531 static int iommu_no_mapping(struct device *dev)
3535 if (iommu_dummy(dev))
3538 if (!iommu_identity_mapping)
3541 found = identity_mapping(dev);
3543 if (iommu_should_identity_map(dev, 0))
3547 * 32 bit DMA is removed from si_domain and fall back
3548 * to non-identity mapping.
3550 dmar_remove_one_dev_info(si_domain, dev);
3551 pr_info("32bit %s uses non-identity mapping\n",
3557 * In case of a detached 64 bit DMA device from vm, the device
3558 * is put into si_domain for identity mapping.
3560 if (iommu_should_identity_map(dev, 0)) {
3562 ret = domain_add_dev_info(si_domain, dev);
3564 pr_info("64bit %s uses identity mapping\n",
3574 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3575 size_t size, int dir, u64 dma_mask)
3577 struct dmar_domain *domain;
3578 phys_addr_t start_paddr;
3579 unsigned long iova_pfn;
3582 struct intel_iommu *iommu;
3583 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3585 BUG_ON(dir == DMA_NONE);
3587 if (iommu_no_mapping(dev))
3590 domain = get_valid_domain_for_dev(dev);
3594 iommu = domain_get_iommu(domain);
3595 size = aligned_nrpages(paddr, size);
3597 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3602 * Check if DMAR supports zero-length reads on write only
3605 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3606 !cap_zlr(iommu->cap))
3607 prot |= DMA_PTE_READ;
3608 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3609 prot |= DMA_PTE_WRITE;
3611 * paddr - (paddr + size) might be partial page, we should map the whole
3612 * page. Note: if two part of one page are separately mapped, we
3613 * might have two guest_addr mapping to the same host paddr, but this
3614 * is not a big problem
3616 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3617 mm_to_dma_pfn(paddr_pfn), size, prot);
3621 /* it's a non-present to present mapping. Only flush if caching mode */
3622 if (cap_caching_mode(iommu->cap))
3623 iommu_flush_iotlb_psi(iommu, domain,
3624 mm_to_dma_pfn(iova_pfn),
3627 iommu_flush_write_buffer(iommu);
3629 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3630 start_paddr += paddr & ~PAGE_MASK;
3635 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3636 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3637 dev_name(dev), size, (unsigned long long)paddr, dir);
3641 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3642 unsigned long offset, size_t size,
3643 enum dma_data_direction dir,
3644 unsigned long attrs)
3646 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3647 dir, *dev->dma_mask);
3650 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3652 struct dmar_domain *domain;
3653 unsigned long start_pfn, last_pfn;
3654 unsigned long nrpages;
3655 unsigned long iova_pfn;
3656 struct intel_iommu *iommu;
3657 struct page *freelist;
3659 if (iommu_no_mapping(dev))
3662 domain = find_domain(dev);
3665 iommu = domain_get_iommu(domain);
3667 iova_pfn = IOVA_PFN(dev_addr);
3669 nrpages = aligned_nrpages(dev_addr, size);
3670 start_pfn = mm_to_dma_pfn(iova_pfn);
3671 last_pfn = start_pfn + nrpages - 1;
3673 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3674 dev_name(dev), start_pfn, last_pfn);
3676 freelist = domain_unmap(domain, start_pfn, last_pfn);
3678 if (intel_iommu_strict) {
3679 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3680 nrpages, !freelist, 0);
3682 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3683 dma_free_pagelist(freelist);
3685 queue_iova(&domain->iovad, iova_pfn, nrpages,
3686 (unsigned long)freelist);
3688 * queue up the release of the unmap to save the 1/6th of the
3689 * cpu used up by the iotlb flush operation...
3694 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3695 size_t size, enum dma_data_direction dir,
3696 unsigned long attrs)
3698 intel_unmap(dev, dev_addr, size);
3701 static void *intel_alloc_coherent(struct device *dev, size_t size,
3702 dma_addr_t *dma_handle, gfp_t flags,
3703 unsigned long attrs)
3705 struct page *page = NULL;
3708 size = PAGE_ALIGN(size);
3709 order = get_order(size);
3711 if (!iommu_no_mapping(dev))
3712 flags &= ~(GFP_DMA | GFP_DMA32);
3713 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3714 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3720 if (gfpflags_allow_blocking(flags)) {
3721 unsigned int count = size >> PAGE_SHIFT;
3723 page = dma_alloc_from_contiguous(dev, count, order, flags);
3724 if (page && iommu_no_mapping(dev) &&
3725 page_to_phys(page) + size > dev->coherent_dma_mask) {
3726 dma_release_from_contiguous(dev, page, count);
3732 page = alloc_pages(flags, order);
3735 memset(page_address(page), 0, size);
3737 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3739 dev->coherent_dma_mask);
3741 return page_address(page);
3742 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3743 __free_pages(page, order);
3748 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3749 dma_addr_t dma_handle, unsigned long attrs)
3752 struct page *page = virt_to_page(vaddr);
3754 size = PAGE_ALIGN(size);
3755 order = get_order(size);
3757 intel_unmap(dev, dma_handle, size);
3758 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3759 __free_pages(page, order);
3762 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3763 int nelems, enum dma_data_direction dir,
3764 unsigned long attrs)
3766 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3767 unsigned long nrpages = 0;
3768 struct scatterlist *sg;
3771 for_each_sg(sglist, sg, nelems, i) {
3772 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3775 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3778 static int intel_nontranslate_map_sg(struct device *hddev,
3779 struct scatterlist *sglist, int nelems, int dir)
3782 struct scatterlist *sg;
3784 for_each_sg(sglist, sg, nelems, i) {
3785 BUG_ON(!sg_page(sg));
3786 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3787 sg->dma_length = sg->length;
3792 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3793 enum dma_data_direction dir, unsigned long attrs)
3796 struct dmar_domain *domain;
3799 unsigned long iova_pfn;
3801 struct scatterlist *sg;
3802 unsigned long start_vpfn;
3803 struct intel_iommu *iommu;
3805 BUG_ON(dir == DMA_NONE);
3806 if (iommu_no_mapping(dev))
3807 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3809 domain = get_valid_domain_for_dev(dev);
3813 iommu = domain_get_iommu(domain);
3815 for_each_sg(sglist, sg, nelems, i)
3816 size += aligned_nrpages(sg->offset, sg->length);
3818 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3821 sglist->dma_length = 0;
3826 * Check if DMAR supports zero-length reads on write only
3829 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3830 !cap_zlr(iommu->cap))
3831 prot |= DMA_PTE_READ;
3832 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3833 prot |= DMA_PTE_WRITE;
3835 start_vpfn = mm_to_dma_pfn(iova_pfn);
3837 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3838 if (unlikely(ret)) {
3839 dma_pte_free_pagetable(domain, start_vpfn,
3840 start_vpfn + size - 1,
3841 agaw_to_level(domain->agaw) + 1);
3842 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3846 /* it's a non-present to present mapping. Only flush if caching mode */
3847 if (cap_caching_mode(iommu->cap))
3848 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3850 iommu_flush_write_buffer(iommu);
3855 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3860 const struct dma_map_ops intel_dma_ops = {
3861 .alloc = intel_alloc_coherent,
3862 .free = intel_free_coherent,
3863 .map_sg = intel_map_sg,
3864 .unmap_sg = intel_unmap_sg,
3865 .map_page = intel_map_page,
3866 .unmap_page = intel_unmap_page,
3867 .mapping_error = intel_mapping_error,
3869 .dma_supported = x86_dma_supported,
3873 static inline int iommu_domain_cache_init(void)
3877 iommu_domain_cache = kmem_cache_create("iommu_domain",
3878 sizeof(struct dmar_domain),
3883 if (!iommu_domain_cache) {
3884 pr_err("Couldn't create iommu_domain cache\n");
3891 static inline int iommu_devinfo_cache_init(void)
3895 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3896 sizeof(struct device_domain_info),
3900 if (!iommu_devinfo_cache) {
3901 pr_err("Couldn't create devinfo cache\n");
3908 static int __init iommu_init_mempool(void)
3911 ret = iova_cache_get();
3915 ret = iommu_domain_cache_init();
3919 ret = iommu_devinfo_cache_init();
3923 kmem_cache_destroy(iommu_domain_cache);
3930 static void __init iommu_exit_mempool(void)
3932 kmem_cache_destroy(iommu_devinfo_cache);
3933 kmem_cache_destroy(iommu_domain_cache);
3937 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3939 struct dmar_drhd_unit *drhd;
3943 /* We know that this device on this chipset has its own IOMMU.
3944 * If we find it under a different IOMMU, then the BIOS is lying
3945 * to us. Hope that the IOMMU for this device is actually
3946 * disabled, and it needs no translation...
3948 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3950 /* "can't" happen */
3951 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3954 vtbar &= 0xffff0000;
3956 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3957 drhd = dmar_find_matched_drhd_unit(pdev);
3958 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3959 TAINT_FIRMWARE_WORKAROUND,
3960 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3961 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3963 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3965 static void __init init_no_remapping_devices(void)
3967 struct dmar_drhd_unit *drhd;
3971 for_each_drhd_unit(drhd) {
3972 if (!drhd->include_all) {
3973 for_each_active_dev_scope(drhd->devices,
3974 drhd->devices_cnt, i, dev)
3976 /* ignore DMAR unit if no devices exist */
3977 if (i == drhd->devices_cnt)
3982 for_each_active_drhd_unit(drhd) {
3983 if (drhd->include_all)
3986 for_each_active_dev_scope(drhd->devices,
3987 drhd->devices_cnt, i, dev)
3988 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3990 if (i < drhd->devices_cnt)
3993 /* This IOMMU has *only* gfx devices. Either bypass it or
3994 set the gfx_mapped flag, as appropriate */
3996 intel_iommu_gfx_mapped = 1;
3999 for_each_active_dev_scope(drhd->devices,
4000 drhd->devices_cnt, i, dev)
4001 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4006 #ifdef CONFIG_SUSPEND
4007 static int init_iommu_hw(void)
4009 struct dmar_drhd_unit *drhd;
4010 struct intel_iommu *iommu = NULL;
4012 for_each_active_iommu(iommu, drhd)
4014 dmar_reenable_qi(iommu);
4016 for_each_iommu(iommu, drhd) {
4017 if (drhd->ignored) {
4019 * we always have to disable PMRs or DMA may fail on
4023 iommu_disable_protect_mem_regions(iommu);
4027 iommu_flush_write_buffer(iommu);
4029 iommu_set_root_entry(iommu);
4031 iommu->flush.flush_context(iommu, 0, 0, 0,
4032 DMA_CCMD_GLOBAL_INVL);
4033 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4034 iommu_enable_translation(iommu);
4035 iommu_disable_protect_mem_regions(iommu);
4041 static void iommu_flush_all(void)
4043 struct dmar_drhd_unit *drhd;
4044 struct intel_iommu *iommu;
4046 for_each_active_iommu(iommu, drhd) {
4047 iommu->flush.flush_context(iommu, 0, 0, 0,
4048 DMA_CCMD_GLOBAL_INVL);
4049 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4050 DMA_TLB_GLOBAL_FLUSH);
4054 static int iommu_suspend(void)
4056 struct dmar_drhd_unit *drhd;
4057 struct intel_iommu *iommu = NULL;
4060 for_each_active_iommu(iommu, drhd) {
4061 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4063 if (!iommu->iommu_state)
4069 for_each_active_iommu(iommu, drhd) {
4070 iommu_disable_translation(iommu);
4072 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4074 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4075 readl(iommu->reg + DMAR_FECTL_REG);
4076 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4077 readl(iommu->reg + DMAR_FEDATA_REG);
4078 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4079 readl(iommu->reg + DMAR_FEADDR_REG);
4080 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4081 readl(iommu->reg + DMAR_FEUADDR_REG);
4083 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4088 for_each_active_iommu(iommu, drhd)
4089 kfree(iommu->iommu_state);
4094 static void iommu_resume(void)
4096 struct dmar_drhd_unit *drhd;
4097 struct intel_iommu *iommu = NULL;
4100 if (init_iommu_hw()) {
4102 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4104 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4108 for_each_active_iommu(iommu, drhd) {
4110 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4112 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4113 iommu->reg + DMAR_FECTL_REG);
4114 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4115 iommu->reg + DMAR_FEDATA_REG);
4116 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4117 iommu->reg + DMAR_FEADDR_REG);
4118 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4119 iommu->reg + DMAR_FEUADDR_REG);
4121 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4124 for_each_active_iommu(iommu, drhd)
4125 kfree(iommu->iommu_state);
4128 static struct syscore_ops iommu_syscore_ops = {
4129 .resume = iommu_resume,
4130 .suspend = iommu_suspend,
4133 static void __init init_iommu_pm_ops(void)
4135 register_syscore_ops(&iommu_syscore_ops);
4139 static inline void init_iommu_pm_ops(void) {}
4140 #endif /* CONFIG_PM */
4143 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4145 struct acpi_dmar_reserved_memory *rmrr;
4146 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4147 struct dmar_rmrr_unit *rmrru;
4150 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4154 rmrru->hdr = header;
4155 rmrr = (struct acpi_dmar_reserved_memory *)header;
4156 rmrru->base_address = rmrr->base_address;
4157 rmrru->end_address = rmrr->end_address;
4159 length = rmrr->end_address - rmrr->base_address + 1;
4160 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4165 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4166 ((void *)rmrr) + rmrr->header.length,
4167 &rmrru->devices_cnt);
4168 if (rmrru->devices_cnt && rmrru->devices == NULL)
4171 list_add(&rmrru->list, &dmar_rmrr_units);
4182 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4184 struct dmar_atsr_unit *atsru;
4185 struct acpi_dmar_atsr *tmp;
4187 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4188 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4189 if (atsr->segment != tmp->segment)
4191 if (atsr->header.length != tmp->header.length)
4193 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4200 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4202 struct acpi_dmar_atsr *atsr;
4203 struct dmar_atsr_unit *atsru;
4205 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4208 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4209 atsru = dmar_find_atsr(atsr);
4213 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4218 * If memory is allocated from slab by ACPI _DSM method, we need to
4219 * copy the memory content because the memory buffer will be freed
4222 atsru->hdr = (void *)(atsru + 1);
4223 memcpy(atsru->hdr, hdr, hdr->length);
4224 atsru->include_all = atsr->flags & 0x1;
4225 if (!atsru->include_all) {
4226 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4227 (void *)atsr + atsr->header.length,
4228 &atsru->devices_cnt);
4229 if (atsru->devices_cnt && atsru->devices == NULL) {
4235 list_add_rcu(&atsru->list, &dmar_atsr_units);
4240 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4242 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4246 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4248 struct acpi_dmar_atsr *atsr;
4249 struct dmar_atsr_unit *atsru;
4251 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4252 atsru = dmar_find_atsr(atsr);
4254 list_del_rcu(&atsru->list);
4256 intel_iommu_free_atsr(atsru);
4262 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4266 struct acpi_dmar_atsr *atsr;
4267 struct dmar_atsr_unit *atsru;
4269 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4270 atsru = dmar_find_atsr(atsr);
4274 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4275 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4283 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4286 struct intel_iommu *iommu = dmaru->iommu;
4288 if (g_iommus[iommu->seq_id])
4291 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4292 pr_warn("%s: Doesn't support hardware pass through.\n",
4296 if (!ecap_sc_support(iommu->ecap) &&
4297 domain_update_iommu_snooping(iommu)) {
4298 pr_warn("%s: Doesn't support snooping.\n",
4302 sp = domain_update_iommu_superpage(iommu) - 1;
4303 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4304 pr_warn("%s: Doesn't support large page.\n",
4310 * Disable translation if already enabled prior to OS handover.
4312 if (iommu->gcmd & DMA_GCMD_TE)
4313 iommu_disable_translation(iommu);
4315 g_iommus[iommu->seq_id] = iommu;
4316 ret = iommu_init_domains(iommu);
4318 ret = iommu_alloc_root_entry(iommu);
4322 #ifdef CONFIG_INTEL_IOMMU_SVM
4323 if (pasid_enabled(iommu))
4324 intel_svm_alloc_pasid_tables(iommu);
4327 if (dmaru->ignored) {
4329 * we always have to disable PMRs or DMA may fail on this device
4332 iommu_disable_protect_mem_regions(iommu);
4336 intel_iommu_init_qi(iommu);
4337 iommu_flush_write_buffer(iommu);
4339 #ifdef CONFIG_INTEL_IOMMU_SVM
4340 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4341 ret = intel_svm_enable_prq(iommu);
4346 ret = dmar_set_interrupt(iommu);
4350 iommu_set_root_entry(iommu);
4351 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4352 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4353 iommu_enable_translation(iommu);
4355 iommu_disable_protect_mem_regions(iommu);
4359 disable_dmar_iommu(iommu);
4361 free_dmar_iommu(iommu);
4365 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4368 struct intel_iommu *iommu = dmaru->iommu;
4370 if (!intel_iommu_enabled)
4376 ret = intel_iommu_add(dmaru);
4378 disable_dmar_iommu(iommu);
4379 free_dmar_iommu(iommu);
4385 static void intel_iommu_free_dmars(void)
4387 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4388 struct dmar_atsr_unit *atsru, *atsr_n;
4390 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4391 list_del(&rmrru->list);
4392 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4397 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4398 list_del(&atsru->list);
4399 intel_iommu_free_atsr(atsru);
4403 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4406 struct pci_bus *bus;
4407 struct pci_dev *bridge = NULL;
4409 struct acpi_dmar_atsr *atsr;
4410 struct dmar_atsr_unit *atsru;
4412 dev = pci_physfn(dev);
4413 for (bus = dev->bus; bus; bus = bus->parent) {
4415 /* If it's an integrated device, allow ATS */
4418 /* Connected via non-PCIe: no ATS */
4419 if (!pci_is_pcie(bridge) ||
4420 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4422 /* If we found the root port, look it up in the ATSR */
4423 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4428 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4429 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4430 if (atsr->segment != pci_domain_nr(dev->bus))
4433 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4434 if (tmp == &bridge->dev)
4437 if (atsru->include_all)
4447 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4450 struct dmar_rmrr_unit *rmrru;
4451 struct dmar_atsr_unit *atsru;
4452 struct acpi_dmar_atsr *atsr;
4453 struct acpi_dmar_reserved_memory *rmrr;
4455 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4458 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4459 rmrr = container_of(rmrru->hdr,
4460 struct acpi_dmar_reserved_memory, header);
4461 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4462 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4463 ((void *)rmrr) + rmrr->header.length,
4464 rmrr->segment, rmrru->devices,
4465 rmrru->devices_cnt);
4468 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4469 dmar_remove_dev_scope(info, rmrr->segment,
4470 rmrru->devices, rmrru->devices_cnt);
4474 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4475 if (atsru->include_all)
4478 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4479 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4480 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4481 (void *)atsr + atsr->header.length,
4482 atsr->segment, atsru->devices,
4483 atsru->devices_cnt);
4488 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4489 if (dmar_remove_dev_scope(info, atsr->segment,
4490 atsru->devices, atsru->devices_cnt))
4499 * Here we only respond to action of unbound device from driver.
4501 * Added device is not attached to its DMAR domain here yet. That will happen
4502 * when mapping the device to iova.
4504 static int device_notifier(struct notifier_block *nb,
4505 unsigned long action, void *data)
4507 struct device *dev = data;
4508 struct dmar_domain *domain;
4510 if (iommu_dummy(dev))
4513 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4516 domain = find_domain(dev);
4520 dmar_remove_one_dev_info(domain, dev);
4521 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4522 domain_exit(domain);
4527 static struct notifier_block device_nb = {
4528 .notifier_call = device_notifier,
4531 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4532 unsigned long val, void *v)
4534 struct memory_notify *mhp = v;
4535 unsigned long long start, end;
4536 unsigned long start_vpfn, last_vpfn;
4539 case MEM_GOING_ONLINE:
4540 start = mhp->start_pfn << PAGE_SHIFT;
4541 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4542 if (iommu_domain_identity_map(si_domain, start, end)) {
4543 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4550 case MEM_CANCEL_ONLINE:
4551 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4552 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4553 while (start_vpfn <= last_vpfn) {
4555 struct dmar_drhd_unit *drhd;
4556 struct intel_iommu *iommu;
4557 struct page *freelist;
4559 iova = find_iova(&si_domain->iovad, start_vpfn);
4561 pr_debug("Failed get IOVA for PFN %lx\n",
4566 iova = split_and_remove_iova(&si_domain->iovad, iova,
4567 start_vpfn, last_vpfn);
4569 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4570 start_vpfn, last_vpfn);
4574 freelist = domain_unmap(si_domain, iova->pfn_lo,
4578 for_each_active_iommu(iommu, drhd)
4579 iommu_flush_iotlb_psi(iommu, si_domain,
4580 iova->pfn_lo, iova_size(iova),
4583 dma_free_pagelist(freelist);
4585 start_vpfn = iova->pfn_hi + 1;
4586 free_iova_mem(iova);
4594 static struct notifier_block intel_iommu_memory_nb = {
4595 .notifier_call = intel_iommu_memory_notifier,
4599 static void free_all_cpu_cached_iovas(unsigned int cpu)
4603 for (i = 0; i < g_num_of_iommus; i++) {
4604 struct intel_iommu *iommu = g_iommus[i];
4605 struct dmar_domain *domain;
4611 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4612 domain = get_iommu_domain(iommu, (u16)did);
4616 free_cpu_cached_iovas(cpu, &domain->iovad);
4621 static int intel_iommu_cpu_dead(unsigned int cpu)
4623 free_all_cpu_cached_iovas(cpu);
4627 static void intel_disable_iommus(void)
4629 struct intel_iommu *iommu = NULL;
4630 struct dmar_drhd_unit *drhd;
4632 for_each_iommu(iommu, drhd)
4633 iommu_disable_translation(iommu);
4636 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4638 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4640 return container_of(iommu_dev, struct intel_iommu, iommu);
4643 static ssize_t intel_iommu_show_version(struct device *dev,
4644 struct device_attribute *attr,
4647 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4648 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4649 return sprintf(buf, "%d:%d\n",
4650 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4652 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4654 static ssize_t intel_iommu_show_address(struct device *dev,
4655 struct device_attribute *attr,
4658 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4659 return sprintf(buf, "%llx\n", iommu->reg_phys);
4661 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4663 static ssize_t intel_iommu_show_cap(struct device *dev,
4664 struct device_attribute *attr,
4667 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4668 return sprintf(buf, "%llx\n", iommu->cap);
4670 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4672 static ssize_t intel_iommu_show_ecap(struct device *dev,
4673 struct device_attribute *attr,
4676 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4677 return sprintf(buf, "%llx\n", iommu->ecap);
4679 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4681 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4682 struct device_attribute *attr,
4685 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4686 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4688 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4690 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4691 struct device_attribute *attr,
4694 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4695 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4696 cap_ndoms(iommu->cap)));
4698 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4700 static struct attribute *intel_iommu_attrs[] = {
4701 &dev_attr_version.attr,
4702 &dev_attr_address.attr,
4704 &dev_attr_ecap.attr,
4705 &dev_attr_domains_supported.attr,
4706 &dev_attr_domains_used.attr,
4710 static struct attribute_group intel_iommu_group = {
4711 .name = "intel-iommu",
4712 .attrs = intel_iommu_attrs,
4715 const struct attribute_group *intel_iommu_groups[] = {
4720 int __init intel_iommu_init(void)
4723 struct dmar_drhd_unit *drhd;
4724 struct intel_iommu *iommu;
4726 /* VT-d is required for a TXT/tboot launch, so enforce that */
4727 force_on = tboot_force_iommu();
4729 if (iommu_init_mempool()) {
4731 panic("tboot: Failed to initialize iommu memory\n");
4735 down_write(&dmar_global_lock);
4736 if (dmar_table_init()) {
4738 panic("tboot: Failed to initialize DMAR table\n");
4742 if (dmar_dev_scope_init() < 0) {
4744 panic("tboot: Failed to initialize DMAR device scope\n");
4748 if (no_iommu || dmar_disabled) {
4750 * We exit the function here to ensure IOMMU's remapping and
4751 * mempool aren't setup, which means that the IOMMU's PMRs
4752 * won't be disabled via the call to init_dmars(). So disable
4753 * it explicitly here. The PMRs were setup by tboot prior to
4754 * calling SENTER, but the kernel is expected to reset/tear
4757 if (intel_iommu_tboot_noforce) {
4758 for_each_iommu(iommu, drhd)
4759 iommu_disable_protect_mem_regions(iommu);
4763 * Make sure the IOMMUs are switched off, even when we
4764 * boot into a kexec kernel and the previous kernel left
4767 intel_disable_iommus();
4771 if (list_empty(&dmar_rmrr_units))
4772 pr_info("No RMRR found\n");
4774 if (list_empty(&dmar_atsr_units))
4775 pr_info("No ATSR found\n");
4777 if (dmar_init_reserved_ranges()) {
4779 panic("tboot: Failed to reserve iommu ranges\n");
4780 goto out_free_reserved_range;
4783 init_no_remapping_devices();
4788 panic("tboot: Failed to initialize DMARs\n");
4789 pr_err("Initialization failed\n");
4790 goto out_free_reserved_range;
4792 up_write(&dmar_global_lock);
4793 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4795 #ifdef CONFIG_SWIOTLB
4798 dma_ops = &intel_dma_ops;
4800 init_iommu_pm_ops();
4802 for_each_active_iommu(iommu, drhd) {
4803 iommu_device_sysfs_add(&iommu->iommu, NULL,
4806 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4807 iommu_device_register(&iommu->iommu);
4810 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4811 bus_register_notifier(&pci_bus_type, &device_nb);
4812 if (si_domain && !hw_pass_through)
4813 register_memory_notifier(&intel_iommu_memory_nb);
4814 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4815 intel_iommu_cpu_dead);
4816 intel_iommu_enabled = 1;
4820 out_free_reserved_range:
4821 put_iova_domain(&reserved_iova_list);
4823 intel_iommu_free_dmars();
4824 up_write(&dmar_global_lock);
4825 iommu_exit_mempool();
4829 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4831 struct intel_iommu *iommu = opaque;
4833 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4838 * NB - intel-iommu lacks any sort of reference counting for the users of
4839 * dependent devices. If multiple endpoints have intersecting dependent
4840 * devices, unbinding the driver from any one of them will possibly leave
4841 * the others unable to operate.
4843 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4845 if (!iommu || !dev || !dev_is_pci(dev))
4848 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4851 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4853 struct intel_iommu *iommu;
4854 unsigned long flags;
4856 assert_spin_locked(&device_domain_lock);
4861 iommu = info->iommu;
4864 iommu_disable_dev_iotlb(info);
4865 domain_context_clear(iommu, info->dev);
4868 unlink_domain_info(info);
4870 spin_lock_irqsave(&iommu->lock, flags);
4871 domain_detach_iommu(info->domain, iommu);
4872 spin_unlock_irqrestore(&iommu->lock, flags);
4874 free_devinfo_mem(info);
4877 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4880 struct device_domain_info *info;
4881 unsigned long flags;
4883 spin_lock_irqsave(&device_domain_lock, flags);
4884 info = dev->archdata.iommu;
4885 __dmar_remove_one_dev_info(info);
4886 spin_unlock_irqrestore(&device_domain_lock, flags);
4889 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4893 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4895 domain_reserve_special_ranges(domain);
4897 /* calculate AGAW */
4898 domain->gaw = guest_width;
4899 adjust_width = guestwidth_to_adjustwidth(guest_width);
4900 domain->agaw = width_to_agaw(adjust_width);
4902 domain->iommu_coherency = 0;
4903 domain->iommu_snooping = 0;
4904 domain->iommu_superpage = 0;
4905 domain->max_addr = 0;
4907 /* always allocate the top pgd */
4908 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4911 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4915 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4917 struct dmar_domain *dmar_domain;
4918 struct iommu_domain *domain;
4920 if (type != IOMMU_DOMAIN_UNMANAGED)
4923 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4925 pr_err("Can't allocate dmar_domain\n");
4928 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4929 pr_err("Domain initialization failed\n");
4930 domain_exit(dmar_domain);
4933 domain_update_iommu_cap(dmar_domain);
4935 domain = &dmar_domain->domain;
4936 domain->geometry.aperture_start = 0;
4937 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4938 domain->geometry.force_aperture = true;
4943 static void intel_iommu_domain_free(struct iommu_domain *domain)
4945 domain_exit(to_dmar_domain(domain));
4948 static int intel_iommu_attach_device(struct iommu_domain *domain,
4951 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4952 struct intel_iommu *iommu;
4956 if (device_is_rmrr_locked(dev)) {
4957 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4961 /* normally dev is not mapped */
4962 if (unlikely(domain_context_mapped(dev))) {
4963 struct dmar_domain *old_domain;
4965 old_domain = find_domain(dev);
4968 dmar_remove_one_dev_info(old_domain, dev);
4971 if (!domain_type_is_vm_or_si(old_domain) &&
4972 list_empty(&old_domain->devices))
4973 domain_exit(old_domain);
4977 iommu = device_to_iommu(dev, &bus, &devfn);
4981 /* check if this iommu agaw is sufficient for max mapped address */
4982 addr_width = agaw_to_width(iommu->agaw);
4983 if (addr_width > cap_mgaw(iommu->cap))
4984 addr_width = cap_mgaw(iommu->cap);
4986 if (dmar_domain->max_addr > (1LL << addr_width)) {
4987 pr_err("%s: iommu width (%d) is not "
4988 "sufficient for the mapped address (%llx)\n",
4989 __func__, addr_width, dmar_domain->max_addr);
4992 dmar_domain->gaw = addr_width;
4995 * Knock out extra levels of page tables if necessary
4997 while (iommu->agaw < dmar_domain->agaw) {
4998 struct dma_pte *pte;
5000 pte = dmar_domain->pgd;
5001 if (dma_pte_present(pte)) {
5002 dmar_domain->pgd = (struct dma_pte *)
5003 phys_to_virt(dma_pte_addr(pte));
5004 free_pgtable_page(pte);
5006 dmar_domain->agaw--;
5009 return domain_add_dev_info(dmar_domain, dev);
5012 static void intel_iommu_detach_device(struct iommu_domain *domain,
5015 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5018 static int intel_iommu_map(struct iommu_domain *domain,
5019 unsigned long iova, phys_addr_t hpa,
5020 size_t size, int iommu_prot)
5022 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5027 if (iommu_prot & IOMMU_READ)
5028 prot |= DMA_PTE_READ;
5029 if (iommu_prot & IOMMU_WRITE)
5030 prot |= DMA_PTE_WRITE;
5031 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5032 prot |= DMA_PTE_SNP;
5034 max_addr = iova + size;
5035 if (dmar_domain->max_addr < max_addr) {
5038 /* check if minimum agaw is sufficient for mapped address */
5039 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5040 if (end < max_addr) {
5041 pr_err("%s: iommu width (%d) is not "
5042 "sufficient for the mapped address (%llx)\n",
5043 __func__, dmar_domain->gaw, max_addr);
5046 dmar_domain->max_addr = max_addr;
5048 /* Round up size to next multiple of PAGE_SIZE, if it and
5049 the low bits of hpa would take us onto the next page */
5050 size = aligned_nrpages(hpa, size);
5051 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5052 hpa >> VTD_PAGE_SHIFT, size, prot);
5056 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5057 unsigned long iova, size_t size)
5059 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5060 struct page *freelist = NULL;
5061 struct intel_iommu *iommu;
5062 unsigned long start_pfn, last_pfn;
5063 unsigned int npages;
5064 int iommu_id, level = 0;
5066 /* Cope with horrid API which requires us to unmap more than the
5067 size argument if it happens to be a large-page mapping. */
5068 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5070 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5071 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5073 start_pfn = iova >> VTD_PAGE_SHIFT;
5074 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5076 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5078 npages = last_pfn - start_pfn + 1;
5080 for_each_domain_iommu(iommu_id, dmar_domain) {
5081 iommu = g_iommus[iommu_id];
5083 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5084 start_pfn, npages, !freelist, 0);
5087 dma_free_pagelist(freelist);
5089 if (dmar_domain->max_addr == iova + size)
5090 dmar_domain->max_addr = iova;
5095 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5098 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5099 struct dma_pte *pte;
5103 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5105 phys = dma_pte_addr(pte);
5110 static bool intel_iommu_capable(enum iommu_cap cap)
5112 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5113 return domain_update_iommu_snooping(NULL) == 1;
5114 if (cap == IOMMU_CAP_INTR_REMAP)
5115 return irq_remapping_enabled == 1;
5120 static int intel_iommu_add_device(struct device *dev)
5122 struct intel_iommu *iommu;
5123 struct iommu_group *group;
5126 iommu = device_to_iommu(dev, &bus, &devfn);
5130 iommu_device_link(&iommu->iommu, dev);
5132 group = iommu_group_get_for_dev(dev);
5135 return PTR_ERR(group);
5137 iommu_group_put(group);
5141 static void intel_iommu_remove_device(struct device *dev)
5143 struct intel_iommu *iommu;
5146 iommu = device_to_iommu(dev, &bus, &devfn);
5150 iommu_group_remove_device(dev);
5152 iommu_device_unlink(&iommu->iommu, dev);
5155 static void intel_iommu_get_resv_regions(struct device *device,
5156 struct list_head *head)
5158 struct iommu_resv_region *reg;
5159 struct dmar_rmrr_unit *rmrr;
5160 struct device *i_dev;
5164 for_each_rmrr_units(rmrr) {
5165 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5167 if (i_dev != device)
5170 list_add_tail(&rmrr->resv->list, head);
5175 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5176 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5180 list_add_tail(®->list, head);
5183 static void intel_iommu_put_resv_regions(struct device *dev,
5184 struct list_head *head)
5186 struct iommu_resv_region *entry, *next;
5188 list_for_each_entry_safe(entry, next, head, list) {
5189 if (entry->type == IOMMU_RESV_RESERVED)
5194 #ifdef CONFIG_INTEL_IOMMU_SVM
5195 #define MAX_NR_PASID_BITS (20)
5196 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5199 * Convert ecap_pss to extend context entry pts encoding, also
5200 * respect the soft pasid_max value set by the iommu.
5201 * - number of PASID bits = ecap_pss + 1
5202 * - number of PASID table entries = 2^(pts + 5)
5203 * Therefore, pts = ecap_pss - 4
5204 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5206 if (ecap_pss(iommu->ecap) < 5)
5209 /* pasid_max is encoded as actual number of entries not the bits */
5210 return find_first_bit((unsigned long *)&iommu->pasid_max,
5211 MAX_NR_PASID_BITS) - 5;
5214 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5216 struct device_domain_info *info;
5217 struct context_entry *context;
5218 struct dmar_domain *domain;
5219 unsigned long flags;
5223 domain = get_valid_domain_for_dev(sdev->dev);
5227 spin_lock_irqsave(&device_domain_lock, flags);
5228 spin_lock(&iommu->lock);
5231 info = sdev->dev->archdata.iommu;
5232 if (!info || !info->pasid_supported)
5235 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5236 if (WARN_ON(!context))
5239 ctx_lo = context[0].lo;
5241 sdev->did = domain->iommu_did[iommu->seq_id];
5242 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5244 if (!(ctx_lo & CONTEXT_PASIDE)) {
5245 if (iommu->pasid_state_table)
5246 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5247 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5248 intel_iommu_get_pts(iommu);
5251 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5252 * extended to permit requests-with-PASID if the PASIDE bit
5253 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5254 * however, the PASIDE bit is ignored and requests-with-PASID
5255 * are unconditionally blocked. Which makes less sense.
5256 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5257 * "guest mode" translation types depending on whether ATS
5258 * is available or not. Annoyingly, we can't use the new
5259 * modes *unless* PASIDE is set. */
5260 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5261 ctx_lo &= ~CONTEXT_TT_MASK;
5262 if (info->ats_supported)
5263 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5265 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5267 ctx_lo |= CONTEXT_PASIDE;
5268 if (iommu->pasid_state_table)
5269 ctx_lo |= CONTEXT_DINVE;
5270 if (info->pri_supported)
5271 ctx_lo |= CONTEXT_PRS;
5272 context[0].lo = ctx_lo;
5274 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5275 DMA_CCMD_MASK_NOBIT,
5276 DMA_CCMD_DEVICE_INVL);
5279 /* Enable PASID support in the device, if it wasn't already */
5280 if (!info->pasid_enabled)
5281 iommu_enable_dev_iotlb(info);
5283 if (info->ats_enabled) {
5284 sdev->dev_iotlb = 1;
5285 sdev->qdep = info->ats_qdep;
5286 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5292 spin_unlock(&iommu->lock);
5293 spin_unlock_irqrestore(&device_domain_lock, flags);
5298 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5300 struct intel_iommu *iommu;
5303 if (iommu_dummy(dev)) {
5305 "No IOMMU translation for device; cannot enable SVM\n");
5309 iommu = device_to_iommu(dev, &bus, &devfn);
5311 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5315 if (!iommu->pasid_table) {
5316 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5322 #endif /* CONFIG_INTEL_IOMMU_SVM */
5324 const struct iommu_ops intel_iommu_ops = {
5325 .capable = intel_iommu_capable,
5326 .domain_alloc = intel_iommu_domain_alloc,
5327 .domain_free = intel_iommu_domain_free,
5328 .attach_dev = intel_iommu_attach_device,
5329 .detach_dev = intel_iommu_detach_device,
5330 .map = intel_iommu_map,
5331 .unmap = intel_iommu_unmap,
5332 .map_sg = default_iommu_map_sg,
5333 .iova_to_phys = intel_iommu_iova_to_phys,
5334 .add_device = intel_iommu_add_device,
5335 .remove_device = intel_iommu_remove_device,
5336 .get_resv_regions = intel_iommu_get_resv_regions,
5337 .put_resv_regions = intel_iommu_put_resv_regions,
5338 .device_group = pci_device_group,
5339 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5342 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5344 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5345 pr_info("Disabling IOMMU for graphics on this chipset\n");
5349 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5350 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5351 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5352 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5353 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5354 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5355 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5357 static void quirk_iommu_rwbf(struct pci_dev *dev)
5360 * Mobile 4 Series Chipset neglects to set RWBF capability,
5361 * but needs it. Same seems to hold for the desktop versions.
5363 pr_info("Forcing write-buffer flush capability\n");
5367 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5369 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5370 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5371 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5372 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5373 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5376 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5377 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5378 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5379 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5380 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5381 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5382 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5383 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5385 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5389 if (pci_read_config_word(dev, GGC, &ggc))
5392 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5393 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5395 } else if (dmar_map_gfx) {
5396 /* we have to ensure the gfx device is idle before we flush */
5397 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5398 intel_iommu_strict = 1;
5401 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5402 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5403 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5404 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5406 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5407 ISOCH DMAR unit for the Azalia sound device, but not give it any
5408 TLB entries, which causes it to deadlock. Check for that. We do
5409 this in a function called from init_dmars(), instead of in a PCI
5410 quirk, because we don't want to print the obnoxious "BIOS broken"
5411 message if VT-d is actually disabled.
5413 static void __init check_tylersburg_isoch(void)
5415 struct pci_dev *pdev;
5416 uint32_t vtisochctrl;
5418 /* If there's no Azalia in the system anyway, forget it. */
5419 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5424 /* System Management Registers. Might be hidden, in which case
5425 we can't do the sanity check. But that's OK, because the
5426 known-broken BIOSes _don't_ actually hide it, so far. */
5427 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5431 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5438 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5439 if (vtisochctrl & 1)
5442 /* Drop all bits other than the number of TLB entries */
5443 vtisochctrl &= 0x1c;
5445 /* If we have the recommended number of TLB entries (16), fine. */
5446 if (vtisochctrl == 0x10)
5449 /* Zero TLB entries? You get to ride the short bus to school. */
5451 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5452 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5453 dmi_get_system_info(DMI_BIOS_VENDOR),
5454 dmi_get_system_info(DMI_BIOS_VERSION),
5455 dmi_get_system_info(DMI_PRODUCT_VERSION));
5456 iommu_identity_mapping |= IDENTMAP_AZALIA;
5460 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",