2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
22 #define dev_fmt(fmt) pr_fmt(fmt)
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/memory.h>
37 #include <linux/cpu.h>
38 #include <linux/timer.h>
40 #include <linux/iova.h>
41 #include <linux/iommu.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/syscore_ops.h>
44 #include <linux/tboot.h>
45 #include <linux/dmi.h>
46 #include <linux/pci-ats.h>
47 #include <linux/memblock.h>
48 #include <linux/dma-contiguous.h>
49 #include <linux/dma-direct.h>
50 #include <linux/crash_dump.h>
51 #include <linux/numa.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/cacheflush.h>
54 #include <asm/iommu.h>
56 #include "irq_remapping.h"
57 #include "intel-pasid.h"
59 #define ROOT_SIZE VTD_PAGE_SIZE
60 #define CONTEXT_SIZE VTD_PAGE_SIZE
62 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
63 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
64 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
65 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
67 #define IOAPIC_RANGE_START (0xfee00000)
68 #define IOAPIC_RANGE_END (0xfeefffff)
69 #define IOVA_START_ADDR (0x1000)
71 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
73 #define MAX_AGAW_WIDTH 64
74 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
76 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
77 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
79 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
80 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
81 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
82 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
83 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
85 /* IO virtual address start page frame number */
86 #define IOVA_START_PFN (1)
88 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
90 /* page table handling */
91 #define LEVEL_STRIDE (9)
92 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
95 * This bitmap is used to advertise the page sizes our hardware support
96 * to the IOMMU core, which will then use this information to split
97 * physically contiguous memory regions it is mapping into page sizes
100 * Traditionally the IOMMU core just handed us the mappings directly,
101 * after making sure the size is an order of a 4KiB page and that the
102 * mapping has natural alignment.
104 * To retain this behavior, we currently advertise that we support
105 * all page sizes that are an order of 4KiB.
107 * If at some point we'd like to utilize the IOMMU core's new behavior,
108 * we could change this to advertise the real page sizes we support.
110 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
112 static inline int agaw_to_level(int agaw)
117 static inline int agaw_to_width(int agaw)
119 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
122 static inline int width_to_agaw(int width)
124 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
127 static inline unsigned int level_to_offset_bits(int level)
129 return (level - 1) * LEVEL_STRIDE;
132 static inline int pfn_level_offset(unsigned long pfn, int level)
134 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
137 static inline unsigned long level_mask(int level)
139 return -1UL << level_to_offset_bits(level);
142 static inline unsigned long level_size(int level)
144 return 1UL << level_to_offset_bits(level);
147 static inline unsigned long align_to_level(unsigned long pfn, int level)
149 return (pfn + level_size(level) - 1) & level_mask(level);
152 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
154 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
157 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
158 are never going to work. */
159 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
161 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
166 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
168 static inline unsigned long page_to_dma_pfn(struct page *pg)
170 return mm_to_dma_pfn(page_to_pfn(pg));
172 static inline unsigned long virt_to_dma_pfn(void *p)
174 return page_to_dma_pfn(virt_to_page(p));
177 /* global iommu list, set NULL for ignored DMAR units */
178 static struct intel_iommu **g_iommus;
180 static void __init check_tylersburg_isoch(void);
181 static int rwbf_quirk;
184 * set to 1 to panic kernel if can't successfully enable VT-d
185 * (used when kernel is launched w/ TXT)
187 static int force_on = 0;
188 int intel_iommu_tboot_noforce;
189 static int no_platform_optin;
191 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
194 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
197 static phys_addr_t root_entry_lctp(struct root_entry *re)
202 return re->lo & VTD_PAGE_MASK;
206 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
209 static phys_addr_t root_entry_uctp(struct root_entry *re)
214 return re->hi & VTD_PAGE_MASK;
217 static inline void context_clear_pasid_enable(struct context_entry *context)
219 context->lo &= ~(1ULL << 11);
222 static inline bool context_pasid_enabled(struct context_entry *context)
224 return !!(context->lo & (1ULL << 11));
227 static inline void context_set_copied(struct context_entry *context)
229 context->hi |= (1ull << 3);
232 static inline bool context_copied(struct context_entry *context)
234 return !!(context->hi & (1ULL << 3));
237 static inline bool __context_present(struct context_entry *context)
239 return (context->lo & 1);
242 bool context_present(struct context_entry *context)
244 return context_pasid_enabled(context) ?
245 __context_present(context) :
246 __context_present(context) && !context_copied(context);
249 static inline void context_set_present(struct context_entry *context)
254 static inline void context_set_fault_enable(struct context_entry *context)
256 context->lo &= (((u64)-1) << 2) | 1;
259 static inline void context_set_translation_type(struct context_entry *context,
262 context->lo &= (((u64)-1) << 4) | 3;
263 context->lo |= (value & 3) << 2;
266 static inline void context_set_address_root(struct context_entry *context,
269 context->lo &= ~VTD_PAGE_MASK;
270 context->lo |= value & VTD_PAGE_MASK;
273 static inline void context_set_address_width(struct context_entry *context,
276 context->hi |= value & 7;
279 static inline void context_set_domain_id(struct context_entry *context,
282 context->hi |= (value & ((1 << 16) - 1)) << 8;
285 static inline int context_domain_id(struct context_entry *c)
287 return((c->hi >> 8) & 0xffff);
290 static inline void context_clear_entry(struct context_entry *context)
297 * This domain is a statically identity mapping domain.
298 * 1. This domain creats a static 1:1 mapping to all usable memory.
299 * 2. It maps to each iommu if successful.
300 * 3. Each iommu mapps to this domain if successful.
302 static struct dmar_domain *si_domain;
303 static int hw_pass_through = 1;
305 /* si_domain contains mulitple devices */
306 #define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
309 * This is a DMA domain allocated through the iommu domain allocation
310 * interface. But one or more devices belonging to this domain have
311 * been chosen to use a private domain. We should avoid to use the
312 * map/unmap/iova_to_phys APIs on it.
314 #define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
316 #define for_each_domain_iommu(idx, domain) \
317 for (idx = 0; idx < g_num_of_iommus; idx++) \
318 if (domain->iommu_refcnt[idx])
320 struct dmar_rmrr_unit {
321 struct list_head list; /* list of rmrr units */
322 struct acpi_dmar_header *hdr; /* ACPI header */
323 u64 base_address; /* reserved base address*/
324 u64 end_address; /* reserved end address */
325 struct dmar_dev_scope *devices; /* target devices */
326 int devices_cnt; /* target device count */
329 struct dmar_atsr_unit {
330 struct list_head list; /* list of ATSR units */
331 struct acpi_dmar_header *hdr; /* ACPI header */
332 struct dmar_dev_scope *devices; /* target devices */
333 int devices_cnt; /* target device count */
334 u8 include_all:1; /* include all ports */
337 static LIST_HEAD(dmar_atsr_units);
338 static LIST_HEAD(dmar_rmrr_units);
340 #define for_each_rmrr_units(rmrr) \
341 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
343 /* bitmap for indexing intel_iommus */
344 static int g_num_of_iommus;
346 static void domain_exit(struct dmar_domain *domain);
347 static void domain_remove_dev_info(struct dmar_domain *domain);
348 static void dmar_remove_one_dev_info(struct device *dev);
349 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
350 static void domain_context_clear(struct intel_iommu *iommu,
352 static int domain_detach_iommu(struct dmar_domain *domain,
353 struct intel_iommu *iommu);
354 static bool device_is_rmrr_locked(struct device *dev);
355 static int intel_iommu_attach_device(struct iommu_domain *domain,
358 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
359 int dmar_disabled = 0;
361 int dmar_disabled = 1;
362 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
365 int intel_iommu_enabled = 0;
366 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
368 static int dmar_map_gfx = 1;
369 static int dmar_forcedac;
370 static int intel_iommu_strict;
371 static int intel_iommu_superpage = 1;
372 static int iommu_identity_mapping;
374 #define IDENTMAP_ALL 1
375 #define IDENTMAP_GFX 2
376 #define IDENTMAP_AZALIA 4
378 int intel_iommu_gfx_mapped;
379 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
381 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
382 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
383 static DEFINE_SPINLOCK(device_domain_lock);
384 static LIST_HEAD(device_domain_list);
387 * Iterate over elements in device_domain_list and call the specified
388 * callback @fn against each element.
390 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
391 void *data), void *data)
395 struct device_domain_info *info;
397 spin_lock_irqsave(&device_domain_lock, flags);
398 list_for_each_entry(info, &device_domain_list, global) {
399 ret = fn(info, data);
401 spin_unlock_irqrestore(&device_domain_lock, flags);
405 spin_unlock_irqrestore(&device_domain_lock, flags);
410 const struct iommu_ops intel_iommu_ops;
412 static bool translation_pre_enabled(struct intel_iommu *iommu)
414 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
417 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
419 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
422 static void init_translation_status(struct intel_iommu *iommu)
426 gsts = readl(iommu->reg + DMAR_GSTS_REG);
427 if (gsts & DMA_GSTS_TES)
428 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
431 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
432 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
434 return container_of(dom, struct dmar_domain, domain);
437 static int __init intel_iommu_setup(char *str)
442 if (!strncmp(str, "on", 2)) {
444 pr_info("IOMMU enabled\n");
445 } else if (!strncmp(str, "off", 3)) {
447 no_platform_optin = 1;
448 pr_info("IOMMU disabled\n");
449 } else if (!strncmp(str, "igfx_off", 8)) {
451 pr_info("Disable GFX device mapping\n");
452 } else if (!strncmp(str, "forcedac", 8)) {
453 pr_info("Forcing DAC for PCI devices\n");
455 } else if (!strncmp(str, "strict", 6)) {
456 pr_info("Disable batched IOTLB flush\n");
457 intel_iommu_strict = 1;
458 } else if (!strncmp(str, "sp_off", 6)) {
459 pr_info("Disable supported super page\n");
460 intel_iommu_superpage = 0;
461 } else if (!strncmp(str, "sm_on", 5)) {
462 pr_info("Intel-IOMMU: scalable mode supported\n");
464 } else if (!strncmp(str, "tboot_noforce", 13)) {
466 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
467 intel_iommu_tboot_noforce = 1;
470 str += strcspn(str, ",");
476 __setup("intel_iommu=", intel_iommu_setup);
478 static struct kmem_cache *iommu_domain_cache;
479 static struct kmem_cache *iommu_devinfo_cache;
481 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
483 struct dmar_domain **domains;
486 domains = iommu->domains[idx];
490 return domains[did & 0xff];
493 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
494 struct dmar_domain *domain)
496 struct dmar_domain **domains;
499 if (!iommu->domains[idx]) {
500 size_t size = 256 * sizeof(struct dmar_domain *);
501 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
504 domains = iommu->domains[idx];
505 if (WARN_ON(!domains))
508 domains[did & 0xff] = domain;
511 void *alloc_pgtable_page(int node)
516 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
518 vaddr = page_address(page);
522 void free_pgtable_page(void *vaddr)
524 free_page((unsigned long)vaddr);
527 static inline void *alloc_domain_mem(void)
529 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
532 static void free_domain_mem(void *vaddr)
534 kmem_cache_free(iommu_domain_cache, vaddr);
537 static inline void * alloc_devinfo_mem(void)
539 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
542 static inline void free_devinfo_mem(void *vaddr)
544 kmem_cache_free(iommu_devinfo_cache, vaddr);
547 static inline int domain_type_is_si(struct dmar_domain *domain)
549 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
552 static inline int domain_pfn_supported(struct dmar_domain *domain,
555 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
557 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
560 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
565 sagaw = cap_sagaw(iommu->cap);
566 for (agaw = width_to_agaw(max_gaw);
568 if (test_bit(agaw, &sagaw))
576 * Calculate max SAGAW for each iommu.
578 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
580 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
584 * calculate agaw for each iommu.
585 * "SAGAW" may be different across iommus, use a default agaw, and
586 * get a supported less agaw for iommus that don't support the default agaw.
588 int iommu_calculate_agaw(struct intel_iommu *iommu)
590 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
593 /* This functionin only returns single iommu in a domain */
594 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
598 /* si_domain and vm domain should not get here. */
599 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
602 for_each_domain_iommu(iommu_id, domain)
605 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
608 return g_iommus[iommu_id];
611 static void domain_update_iommu_coherency(struct dmar_domain *domain)
613 struct dmar_drhd_unit *drhd;
614 struct intel_iommu *iommu;
618 domain->iommu_coherency = 1;
620 for_each_domain_iommu(i, domain) {
622 if (!ecap_coherent(g_iommus[i]->ecap)) {
623 domain->iommu_coherency = 0;
630 /* No hardware attached; use lowest common denominator */
632 for_each_active_iommu(iommu, drhd) {
633 if (!ecap_coherent(iommu->ecap)) {
634 domain->iommu_coherency = 0;
641 static int domain_update_iommu_snooping(struct intel_iommu *skip)
643 struct dmar_drhd_unit *drhd;
644 struct intel_iommu *iommu;
648 for_each_active_iommu(iommu, drhd) {
650 if (!ecap_sc_support(iommu->ecap)) {
661 static int domain_update_iommu_superpage(struct intel_iommu *skip)
663 struct dmar_drhd_unit *drhd;
664 struct intel_iommu *iommu;
667 if (!intel_iommu_superpage) {
671 /* set iommu_superpage to the smallest common denominator */
673 for_each_active_iommu(iommu, drhd) {
675 mask &= cap_super_page_val(iommu->cap);
685 /* Some capabilities may be different across iommus */
686 static void domain_update_iommu_cap(struct dmar_domain *domain)
688 domain_update_iommu_coherency(domain);
689 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
690 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
693 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
696 struct root_entry *root = &iommu->root_entry[bus];
697 struct context_entry *context;
701 if (sm_supported(iommu)) {
709 context = phys_to_virt(*entry & VTD_PAGE_MASK);
711 unsigned long phy_addr;
715 context = alloc_pgtable_page(iommu->node);
719 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
720 phy_addr = virt_to_phys((void *)context);
721 *entry = phy_addr | 1;
722 __iommu_flush_cache(iommu, entry, sizeof(*entry));
724 return &context[devfn];
727 static int iommu_dummy(struct device *dev)
729 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
733 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
734 * sub-hierarchy of a candidate PCI-PCI bridge
735 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
736 * @bridge: the candidate PCI-PCI bridge
738 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
741 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
743 struct pci_dev *pdev, *pbridge;
745 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
748 pdev = to_pci_dev(dev);
749 pbridge = to_pci_dev(bridge);
751 if (pbridge->subordinate &&
752 pbridge->subordinate->number <= pdev->bus->number &&
753 pbridge->subordinate->busn_res.end >= pdev->bus->number)
759 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
761 struct dmar_drhd_unit *drhd = NULL;
762 struct intel_iommu *iommu;
764 struct pci_dev *pdev = NULL;
768 if (iommu_dummy(dev))
771 if (dev_is_pci(dev)) {
772 struct pci_dev *pf_pdev;
774 pdev = to_pci_dev(dev);
777 /* VMD child devices currently cannot be handled individually */
778 if (is_vmd(pdev->bus))
782 /* VFs aren't listed in scope tables; we need to look up
783 * the PF instead to find the IOMMU. */
784 pf_pdev = pci_physfn(pdev);
786 segment = pci_domain_nr(pdev->bus);
787 } else if (has_acpi_companion(dev))
788 dev = &ACPI_COMPANION(dev)->dev;
791 for_each_active_iommu(iommu, drhd) {
792 if (pdev && segment != drhd->segment)
795 for_each_active_dev_scope(drhd->devices,
796 drhd->devices_cnt, i, tmp) {
798 /* For a VF use its original BDF# not that of the PF
799 * which we used for the IOMMU lookup. Strictly speaking
800 * we could do this for all PCI devices; we only need to
801 * get the BDF# from the scope table for ACPI matches. */
802 if (pdev && pdev->is_virtfn)
805 *bus = drhd->devices[i].bus;
806 *devfn = drhd->devices[i].devfn;
810 if (is_downstream_to_pci_bridge(dev, tmp))
814 if (pdev && drhd->include_all) {
816 *bus = pdev->bus->number;
817 *devfn = pdev->devfn;
828 static void domain_flush_cache(struct dmar_domain *domain,
829 void *addr, int size)
831 if (!domain->iommu_coherency)
832 clflush_cache_range(addr, size);
835 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
837 struct context_entry *context;
841 spin_lock_irqsave(&iommu->lock, flags);
842 context = iommu_context_addr(iommu, bus, devfn, 0);
844 ret = context_present(context);
845 spin_unlock_irqrestore(&iommu->lock, flags);
849 static void free_context_table(struct intel_iommu *iommu)
853 struct context_entry *context;
855 spin_lock_irqsave(&iommu->lock, flags);
856 if (!iommu->root_entry) {
859 for (i = 0; i < ROOT_ENTRY_NR; i++) {
860 context = iommu_context_addr(iommu, i, 0, 0);
862 free_pgtable_page(context);
864 if (!sm_supported(iommu))
867 context = iommu_context_addr(iommu, i, 0x80, 0);
869 free_pgtable_page(context);
872 free_pgtable_page(iommu->root_entry);
873 iommu->root_entry = NULL;
875 spin_unlock_irqrestore(&iommu->lock, flags);
878 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
879 unsigned long pfn, int *target_level)
881 struct dma_pte *parent, *pte;
882 int level = agaw_to_level(domain->agaw);
885 BUG_ON(!domain->pgd);
887 if (!domain_pfn_supported(domain, pfn))
888 /* Address beyond IOMMU's addressing capabilities. */
891 parent = domain->pgd;
896 offset = pfn_level_offset(pfn, level);
897 pte = &parent[offset];
898 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
900 if (level == *target_level)
903 if (!dma_pte_present(pte)) {
906 tmp_page = alloc_pgtable_page(domain->nid);
911 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
912 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
913 if (cmpxchg64(&pte->val, 0ULL, pteval))
914 /* Someone else set it while we were thinking; use theirs. */
915 free_pgtable_page(tmp_page);
917 domain_flush_cache(domain, pte, sizeof(*pte));
922 parent = phys_to_virt(dma_pte_addr(pte));
927 *target_level = level;
933 /* return address's pte at specific level */
934 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
936 int level, int *large_page)
938 struct dma_pte *parent, *pte;
939 int total = agaw_to_level(domain->agaw);
942 parent = domain->pgd;
943 while (level <= total) {
944 offset = pfn_level_offset(pfn, total);
945 pte = &parent[offset];
949 if (!dma_pte_present(pte)) {
954 if (dma_pte_superpage(pte)) {
959 parent = phys_to_virt(dma_pte_addr(pte));
965 /* clear last level pte, a tlb flush should be followed */
966 static void dma_pte_clear_range(struct dmar_domain *domain,
967 unsigned long start_pfn,
968 unsigned long last_pfn)
970 unsigned int large_page;
971 struct dma_pte *first_pte, *pte;
973 BUG_ON(!domain_pfn_supported(domain, start_pfn));
974 BUG_ON(!domain_pfn_supported(domain, last_pfn));
975 BUG_ON(start_pfn > last_pfn);
977 /* we don't need lock here; nobody else touches the iova range */
980 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
982 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
987 start_pfn += lvl_to_nr_pages(large_page);
989 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
991 domain_flush_cache(domain, first_pte,
992 (void *)pte - (void *)first_pte);
994 } while (start_pfn && start_pfn <= last_pfn);
997 static void dma_pte_free_level(struct dmar_domain *domain, int level,
998 int retain_level, struct dma_pte *pte,
999 unsigned long pfn, unsigned long start_pfn,
1000 unsigned long last_pfn)
1002 pfn = max(start_pfn, pfn);
1003 pte = &pte[pfn_level_offset(pfn, level)];
1006 unsigned long level_pfn;
1007 struct dma_pte *level_pte;
1009 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1012 level_pfn = pfn & level_mask(level);
1013 level_pte = phys_to_virt(dma_pte_addr(pte));
1016 dma_pte_free_level(domain, level - 1, retain_level,
1017 level_pte, level_pfn, start_pfn,
1022 * Free the page table if we're below the level we want to
1023 * retain and the range covers the entire table.
1025 if (level < retain_level && !(start_pfn > level_pfn ||
1026 last_pfn < level_pfn + level_size(level) - 1)) {
1028 domain_flush_cache(domain, pte, sizeof(*pte));
1029 free_pgtable_page(level_pte);
1032 pfn += level_size(level);
1033 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1037 * clear last level (leaf) ptes and free page table pages below the
1038 * level we wish to keep intact.
1040 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1041 unsigned long start_pfn,
1042 unsigned long last_pfn,
1045 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1046 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1047 BUG_ON(start_pfn > last_pfn);
1049 dma_pte_clear_range(domain, start_pfn, last_pfn);
1051 /* We don't need lock here; nobody else touches the iova range */
1052 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1053 domain->pgd, 0, start_pfn, last_pfn);
1056 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1057 free_pgtable_page(domain->pgd);
1062 /* When a page at a given level is being unlinked from its parent, we don't
1063 need to *modify* it at all. All we need to do is make a list of all the
1064 pages which can be freed just as soon as we've flushed the IOTLB and we
1065 know the hardware page-walk will no longer touch them.
1066 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1068 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1069 int level, struct dma_pte *pte,
1070 struct page *freelist)
1074 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1075 pg->freelist = freelist;
1081 pte = page_address(pg);
1083 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1084 freelist = dma_pte_list_pagetables(domain, level - 1,
1087 } while (!first_pte_in_page(pte));
1092 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1093 struct dma_pte *pte, unsigned long pfn,
1094 unsigned long start_pfn,
1095 unsigned long last_pfn,
1096 struct page *freelist)
1098 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1100 pfn = max(start_pfn, pfn);
1101 pte = &pte[pfn_level_offset(pfn, level)];
1104 unsigned long level_pfn;
1106 if (!dma_pte_present(pte))
1109 level_pfn = pfn & level_mask(level);
1111 /* If range covers entire pagetable, free it */
1112 if (start_pfn <= level_pfn &&
1113 last_pfn >= level_pfn + level_size(level) - 1) {
1114 /* These suborbinate page tables are going away entirely. Don't
1115 bother to clear them; we're just going to *free* them. */
1116 if (level > 1 && !dma_pte_superpage(pte))
1117 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1123 } else if (level > 1) {
1124 /* Recurse down into a level that isn't *entirely* obsolete */
1125 freelist = dma_pte_clear_level(domain, level - 1,
1126 phys_to_virt(dma_pte_addr(pte)),
1127 level_pfn, start_pfn, last_pfn,
1131 pfn += level_size(level);
1132 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1135 domain_flush_cache(domain, first_pte,
1136 (void *)++last_pte - (void *)first_pte);
1141 /* We can't just free the pages because the IOMMU may still be walking
1142 the page tables, and may have cached the intermediate levels. The
1143 pages can only be freed after the IOTLB flush has been done. */
1144 static struct page *domain_unmap(struct dmar_domain *domain,
1145 unsigned long start_pfn,
1146 unsigned long last_pfn)
1148 struct page *freelist;
1150 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1151 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1152 BUG_ON(start_pfn > last_pfn);
1154 /* we don't need lock here; nobody else touches the iova range */
1155 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1156 domain->pgd, 0, start_pfn, last_pfn, NULL);
1159 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1160 struct page *pgd_page = virt_to_page(domain->pgd);
1161 pgd_page->freelist = freelist;
1162 freelist = pgd_page;
1170 static void dma_free_pagelist(struct page *freelist)
1174 while ((pg = freelist)) {
1175 freelist = pg->freelist;
1176 free_pgtable_page(page_address(pg));
1180 static void iova_entry_free(unsigned long data)
1182 struct page *freelist = (struct page *)data;
1184 dma_free_pagelist(freelist);
1187 /* iommu handling */
1188 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1190 struct root_entry *root;
1191 unsigned long flags;
1193 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1195 pr_err("Allocating root entry for %s failed\n",
1200 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1202 spin_lock_irqsave(&iommu->lock, flags);
1203 iommu->root_entry = root;
1204 spin_unlock_irqrestore(&iommu->lock, flags);
1209 static void iommu_set_root_entry(struct intel_iommu *iommu)
1215 addr = virt_to_phys(iommu->root_entry);
1216 if (sm_supported(iommu))
1217 addr |= DMA_RTADDR_SMT;
1219 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1220 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1222 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1224 /* Make sure hardware complete it */
1225 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1226 readl, (sts & DMA_GSTS_RTPS), sts);
1228 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1231 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1236 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1239 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1240 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1242 /* Make sure hardware complete it */
1243 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1244 readl, (!(val & DMA_GSTS_WBFS)), val);
1246 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1249 /* return value determine if we need a write buffer flush */
1250 static void __iommu_flush_context(struct intel_iommu *iommu,
1251 u16 did, u16 source_id, u8 function_mask,
1258 case DMA_CCMD_GLOBAL_INVL:
1259 val = DMA_CCMD_GLOBAL_INVL;
1261 case DMA_CCMD_DOMAIN_INVL:
1262 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1264 case DMA_CCMD_DEVICE_INVL:
1265 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1266 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1271 val |= DMA_CCMD_ICC;
1273 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1274 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1276 /* Make sure hardware complete it */
1277 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1278 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1280 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1283 /* return value determine if we need a write buffer flush */
1284 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1285 u64 addr, unsigned int size_order, u64 type)
1287 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1288 u64 val = 0, val_iva = 0;
1292 case DMA_TLB_GLOBAL_FLUSH:
1293 /* global flush doesn't need set IVA_REG */
1294 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1296 case DMA_TLB_DSI_FLUSH:
1297 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1299 case DMA_TLB_PSI_FLUSH:
1300 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1301 /* IH bit is passed in as part of address */
1302 val_iva = size_order | addr;
1307 /* Note: set drain read/write */
1310 * This is probably to be super secure.. Looks like we can
1311 * ignore it without any impact.
1313 if (cap_read_drain(iommu->cap))
1314 val |= DMA_TLB_READ_DRAIN;
1316 if (cap_write_drain(iommu->cap))
1317 val |= DMA_TLB_WRITE_DRAIN;
1319 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1320 /* Note: Only uses first TLB reg currently */
1322 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1323 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1325 /* Make sure hardware complete it */
1326 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1327 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1329 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1331 /* check IOTLB invalidation granularity */
1332 if (DMA_TLB_IAIG(val) == 0)
1333 pr_err("Flush IOTLB failed\n");
1334 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1335 pr_debug("TLB flush request %Lx, actual %Lx\n",
1336 (unsigned long long)DMA_TLB_IIRG(type),
1337 (unsigned long long)DMA_TLB_IAIG(val));
1340 static struct device_domain_info *
1341 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1344 struct device_domain_info *info;
1346 assert_spin_locked(&device_domain_lock);
1351 list_for_each_entry(info, &domain->devices, link)
1352 if (info->iommu == iommu && info->bus == bus &&
1353 info->devfn == devfn) {
1354 if (info->ats_supported && info->dev)
1362 static void domain_update_iotlb(struct dmar_domain *domain)
1364 struct device_domain_info *info;
1365 bool has_iotlb_device = false;
1367 assert_spin_locked(&device_domain_lock);
1369 list_for_each_entry(info, &domain->devices, link) {
1370 struct pci_dev *pdev;
1372 if (!info->dev || !dev_is_pci(info->dev))
1375 pdev = to_pci_dev(info->dev);
1376 if (pdev->ats_enabled) {
1377 has_iotlb_device = true;
1382 domain->has_iotlb_device = has_iotlb_device;
1385 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1387 struct pci_dev *pdev;
1389 assert_spin_locked(&device_domain_lock);
1391 if (!info || !dev_is_pci(info->dev))
1394 pdev = to_pci_dev(info->dev);
1395 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1396 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1397 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1398 * reserved, which should be set to 0.
1400 if (!ecap_dit(info->iommu->ecap))
1403 struct pci_dev *pf_pdev;
1405 /* pdev will be returned if device is not a vf */
1406 pf_pdev = pci_physfn(pdev);
1407 info->pfsid = pci_dev_id(pf_pdev);
1410 #ifdef CONFIG_INTEL_IOMMU_SVM
1411 /* The PCIe spec, in its wisdom, declares that the behaviour of
1412 the device if you enable PASID support after ATS support is
1413 undefined. So always enable PASID support on devices which
1414 have it, even if we can't yet know if we're ever going to
1416 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1417 info->pasid_enabled = 1;
1419 if (info->pri_supported &&
1420 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1421 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1422 info->pri_enabled = 1;
1424 if (!pdev->untrusted && info->ats_supported &&
1425 pci_ats_page_aligned(pdev) &&
1426 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1427 info->ats_enabled = 1;
1428 domain_update_iotlb(info->domain);
1429 info->ats_qdep = pci_ats_queue_depth(pdev);
1433 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1435 struct pci_dev *pdev;
1437 assert_spin_locked(&device_domain_lock);
1439 if (!dev_is_pci(info->dev))
1442 pdev = to_pci_dev(info->dev);
1444 if (info->ats_enabled) {
1445 pci_disable_ats(pdev);
1446 info->ats_enabled = 0;
1447 domain_update_iotlb(info->domain);
1449 #ifdef CONFIG_INTEL_IOMMU_SVM
1450 if (info->pri_enabled) {
1451 pci_disable_pri(pdev);
1452 info->pri_enabled = 0;
1454 if (info->pasid_enabled) {
1455 pci_disable_pasid(pdev);
1456 info->pasid_enabled = 0;
1461 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1462 u64 addr, unsigned mask)
1465 unsigned long flags;
1466 struct device_domain_info *info;
1468 if (!domain->has_iotlb_device)
1471 spin_lock_irqsave(&device_domain_lock, flags);
1472 list_for_each_entry(info, &domain->devices, link) {
1473 if (!info->ats_enabled)
1476 sid = info->bus << 8 | info->devfn;
1477 qdep = info->ats_qdep;
1478 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1481 spin_unlock_irqrestore(&device_domain_lock, flags);
1484 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1485 struct dmar_domain *domain,
1486 unsigned long pfn, unsigned int pages,
1489 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1490 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1491 u16 did = domain->iommu_did[iommu->seq_id];
1498 * Fallback to domain selective flush if no PSI support or the size is
1500 * PSI requires page size to be 2 ^ x, and the base address is naturally
1501 * aligned to the size
1503 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1504 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1507 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1511 * In caching mode, changes of pages from non-present to present require
1512 * flush. However, device IOTLB doesn't need to be flushed in this case.
1514 if (!cap_caching_mode(iommu->cap) || !map)
1515 iommu_flush_dev_iotlb(domain, addr, mask);
1518 /* Notification for newly created mappings */
1519 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1520 struct dmar_domain *domain,
1521 unsigned long pfn, unsigned int pages)
1523 /* It's a non-present to present mapping. Only flush if caching mode */
1524 if (cap_caching_mode(iommu->cap))
1525 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1527 iommu_flush_write_buffer(iommu);
1530 static void iommu_flush_iova(struct iova_domain *iovad)
1532 struct dmar_domain *domain;
1535 domain = container_of(iovad, struct dmar_domain, iovad);
1537 for_each_domain_iommu(idx, domain) {
1538 struct intel_iommu *iommu = g_iommus[idx];
1539 u16 did = domain->iommu_did[iommu->seq_id];
1541 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1543 if (!cap_caching_mode(iommu->cap))
1544 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1545 0, MAX_AGAW_PFN_WIDTH);
1549 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1552 unsigned long flags;
1554 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1557 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1558 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1559 pmen &= ~DMA_PMEN_EPM;
1560 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1562 /* wait for the protected region status bit to clear */
1563 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1564 readl, !(pmen & DMA_PMEN_PRS), pmen);
1566 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1569 static void iommu_enable_translation(struct intel_iommu *iommu)
1572 unsigned long flags;
1574 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1575 iommu->gcmd |= DMA_GCMD_TE;
1576 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1578 /* Make sure hardware complete it */
1579 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1580 readl, (sts & DMA_GSTS_TES), sts);
1582 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1585 static void iommu_disable_translation(struct intel_iommu *iommu)
1590 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1591 iommu->gcmd &= ~DMA_GCMD_TE;
1592 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1594 /* Make sure hardware complete it */
1595 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1596 readl, (!(sts & DMA_GSTS_TES)), sts);
1598 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1602 static int iommu_init_domains(struct intel_iommu *iommu)
1604 u32 ndomains, nlongs;
1607 ndomains = cap_ndoms(iommu->cap);
1608 pr_debug("%s: Number of Domains supported <%d>\n",
1609 iommu->name, ndomains);
1610 nlongs = BITS_TO_LONGS(ndomains);
1612 spin_lock_init(&iommu->lock);
1614 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1615 if (!iommu->domain_ids) {
1616 pr_err("%s: Allocating domain id array failed\n",
1621 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1622 iommu->domains = kzalloc(size, GFP_KERNEL);
1624 if (iommu->domains) {
1625 size = 256 * sizeof(struct dmar_domain *);
1626 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1629 if (!iommu->domains || !iommu->domains[0]) {
1630 pr_err("%s: Allocating domain array failed\n",
1632 kfree(iommu->domain_ids);
1633 kfree(iommu->domains);
1634 iommu->domain_ids = NULL;
1635 iommu->domains = NULL;
1642 * If Caching mode is set, then invalid translations are tagged
1643 * with domain-id 0, hence we need to pre-allocate it. We also
1644 * use domain-id 0 as a marker for non-allocated domain-id, so
1645 * make sure it is not used for a real domain.
1647 set_bit(0, iommu->domain_ids);
1650 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1651 * entry for first-level or pass-through translation modes should
1652 * be programmed with a domain id different from those used for
1653 * second-level or nested translation. We reserve a domain id for
1656 if (sm_supported(iommu))
1657 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1662 static void disable_dmar_iommu(struct intel_iommu *iommu)
1664 struct device_domain_info *info, *tmp;
1665 unsigned long flags;
1667 if (!iommu->domains || !iommu->domain_ids)
1670 spin_lock_irqsave(&device_domain_lock, flags);
1671 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1672 if (info->iommu != iommu)
1675 if (!info->dev || !info->domain)
1678 __dmar_remove_one_dev_info(info);
1680 spin_unlock_irqrestore(&device_domain_lock, flags);
1682 if (iommu->gcmd & DMA_GCMD_TE)
1683 iommu_disable_translation(iommu);
1686 static void free_dmar_iommu(struct intel_iommu *iommu)
1688 if ((iommu->domains) && (iommu->domain_ids)) {
1689 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1692 for (i = 0; i < elems; i++)
1693 kfree(iommu->domains[i]);
1694 kfree(iommu->domains);
1695 kfree(iommu->domain_ids);
1696 iommu->domains = NULL;
1697 iommu->domain_ids = NULL;
1700 g_iommus[iommu->seq_id] = NULL;
1702 /* free context mapping */
1703 free_context_table(iommu);
1705 #ifdef CONFIG_INTEL_IOMMU_SVM
1706 if (pasid_supported(iommu)) {
1707 if (ecap_prs(iommu->ecap))
1708 intel_svm_finish_prq(iommu);
1713 static struct dmar_domain *alloc_domain(int flags)
1715 struct dmar_domain *domain;
1717 domain = alloc_domain_mem();
1721 memset(domain, 0, sizeof(*domain));
1722 domain->nid = NUMA_NO_NODE;
1723 domain->flags = flags;
1724 domain->has_iotlb_device = false;
1725 INIT_LIST_HEAD(&domain->devices);
1730 /* Must be called with iommu->lock */
1731 static int domain_attach_iommu(struct dmar_domain *domain,
1732 struct intel_iommu *iommu)
1734 unsigned long ndomains;
1737 assert_spin_locked(&device_domain_lock);
1738 assert_spin_locked(&iommu->lock);
1740 domain->iommu_refcnt[iommu->seq_id] += 1;
1741 domain->iommu_count += 1;
1742 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1743 ndomains = cap_ndoms(iommu->cap);
1744 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1746 if (num >= ndomains) {
1747 pr_err("%s: No free domain ids\n", iommu->name);
1748 domain->iommu_refcnt[iommu->seq_id] -= 1;
1749 domain->iommu_count -= 1;
1753 set_bit(num, iommu->domain_ids);
1754 set_iommu_domain(iommu, num, domain);
1756 domain->iommu_did[iommu->seq_id] = num;
1757 domain->nid = iommu->node;
1759 domain_update_iommu_cap(domain);
1765 static int domain_detach_iommu(struct dmar_domain *domain,
1766 struct intel_iommu *iommu)
1770 assert_spin_locked(&device_domain_lock);
1771 assert_spin_locked(&iommu->lock);
1773 domain->iommu_refcnt[iommu->seq_id] -= 1;
1774 count = --domain->iommu_count;
1775 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1776 num = domain->iommu_did[iommu->seq_id];
1777 clear_bit(num, iommu->domain_ids);
1778 set_iommu_domain(iommu, num, NULL);
1780 domain_update_iommu_cap(domain);
1781 domain->iommu_did[iommu->seq_id] = 0;
1787 static struct iova_domain reserved_iova_list;
1788 static struct lock_class_key reserved_rbtree_key;
1790 static int dmar_init_reserved_ranges(void)
1792 struct pci_dev *pdev = NULL;
1796 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1798 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1799 &reserved_rbtree_key);
1801 /* IOAPIC ranges shouldn't be accessed by DMA */
1802 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1803 IOVA_PFN(IOAPIC_RANGE_END));
1805 pr_err("Reserve IOAPIC range failed\n");
1809 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1810 for_each_pci_dev(pdev) {
1813 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1814 r = &pdev->resource[i];
1815 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1817 iova = reserve_iova(&reserved_iova_list,
1821 pci_err(pdev, "Reserve iova for %pR failed\n", r);
1829 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1831 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1834 static inline int guestwidth_to_adjustwidth(int gaw)
1837 int r = (gaw - 12) % 9;
1848 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1851 int adjust_width, agaw;
1852 unsigned long sagaw;
1855 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1857 err = init_iova_flush_queue(&domain->iovad,
1858 iommu_flush_iova, iova_entry_free);
1862 domain_reserve_special_ranges(domain);
1864 /* calculate AGAW */
1865 if (guest_width > cap_mgaw(iommu->cap))
1866 guest_width = cap_mgaw(iommu->cap);
1867 domain->gaw = guest_width;
1868 adjust_width = guestwidth_to_adjustwidth(guest_width);
1869 agaw = width_to_agaw(adjust_width);
1870 sagaw = cap_sagaw(iommu->cap);
1871 if (!test_bit(agaw, &sagaw)) {
1872 /* hardware doesn't support it, choose a bigger one */
1873 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1874 agaw = find_next_bit(&sagaw, 5, agaw);
1878 domain->agaw = agaw;
1880 if (ecap_coherent(iommu->ecap))
1881 domain->iommu_coherency = 1;
1883 domain->iommu_coherency = 0;
1885 if (ecap_sc_support(iommu->ecap))
1886 domain->iommu_snooping = 1;
1888 domain->iommu_snooping = 0;
1890 if (intel_iommu_superpage)
1891 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1893 domain->iommu_superpage = 0;
1895 domain->nid = iommu->node;
1897 /* always allocate the top pgd */
1898 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1901 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1905 static void domain_exit(struct dmar_domain *domain)
1907 struct page *freelist;
1909 /* Remove associated devices and clear attached or cached domains */
1910 domain_remove_dev_info(domain);
1913 put_iova_domain(&domain->iovad);
1915 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1917 dma_free_pagelist(freelist);
1919 free_domain_mem(domain);
1923 * Get the PASID directory size for scalable mode context entry.
1924 * Value of X in the PDTS field of a scalable mode context entry
1925 * indicates PASID directory with 2^(X + 7) entries.
1927 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1931 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1932 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1940 * Set the RID_PASID field of a scalable mode context entry. The
1941 * IOMMU hardware will use the PASID value set in this field for
1942 * DMA translations of DMA requests without PASID.
1945 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1947 context->hi |= pasid & ((1 << 20) - 1);
1948 context->hi |= (1 << 20);
1952 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1955 static inline void context_set_sm_dte(struct context_entry *context)
1957 context->lo |= (1 << 2);
1961 * Set the PRE(Page Request Enable) field of a scalable mode context
1964 static inline void context_set_sm_pre(struct context_entry *context)
1966 context->lo |= (1 << 4);
1969 /* Convert value to context PASID directory size field coding. */
1970 #define context_pdts(pds) (((pds) & 0x7) << 9)
1972 static int domain_context_mapping_one(struct dmar_domain *domain,
1973 struct intel_iommu *iommu,
1974 struct pasid_table *table,
1977 u16 did = domain->iommu_did[iommu->seq_id];
1978 int translation = CONTEXT_TT_MULTI_LEVEL;
1979 struct device_domain_info *info = NULL;
1980 struct context_entry *context;
1981 unsigned long flags;
1986 if (hw_pass_through && domain_type_is_si(domain))
1987 translation = CONTEXT_TT_PASS_THROUGH;
1989 pr_debug("Set context mapping for %02x:%02x.%d\n",
1990 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1992 BUG_ON(!domain->pgd);
1994 spin_lock_irqsave(&device_domain_lock, flags);
1995 spin_lock(&iommu->lock);
1998 context = iommu_context_addr(iommu, bus, devfn, 1);
2003 if (context_present(context))
2007 * For kdump cases, old valid entries may be cached due to the
2008 * in-flight DMA and copied pgtable, but there is no unmapping
2009 * behaviour for them, thus we need an explicit cache flush for
2010 * the newly-mapped device. For kdump, at this point, the device
2011 * is supposed to finish reset at its driver probe stage, so no
2012 * in-flight DMA will exist, and we don't need to worry anymore
2015 if (context_copied(context)) {
2016 u16 did_old = context_domain_id(context);
2018 if (did_old < cap_ndoms(iommu->cap)) {
2019 iommu->flush.flush_context(iommu, did_old,
2020 (((u16)bus) << 8) | devfn,
2021 DMA_CCMD_MASK_NOBIT,
2022 DMA_CCMD_DEVICE_INVL);
2023 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2028 context_clear_entry(context);
2030 if (sm_supported(iommu)) {
2035 /* Setup the PASID DIR pointer: */
2036 pds = context_get_sm_pds(table);
2037 context->lo = (u64)virt_to_phys(table->table) |
2040 /* Setup the RID_PASID field: */
2041 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2044 * Setup the Device-TLB enable bit and Page request
2047 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2048 if (info && info->ats_supported)
2049 context_set_sm_dte(context);
2050 if (info && info->pri_supported)
2051 context_set_sm_pre(context);
2053 struct dma_pte *pgd = domain->pgd;
2056 context_set_domain_id(context, did);
2058 if (translation != CONTEXT_TT_PASS_THROUGH) {
2060 * Skip top levels of page tables for iommu which has
2061 * less agaw than default. Unnecessary for PT mode.
2063 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2065 pgd = phys_to_virt(dma_pte_addr(pgd));
2066 if (!dma_pte_present(pgd))
2070 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2071 if (info && info->ats_supported)
2072 translation = CONTEXT_TT_DEV_IOTLB;
2074 translation = CONTEXT_TT_MULTI_LEVEL;
2076 context_set_address_root(context, virt_to_phys(pgd));
2077 context_set_address_width(context, agaw);
2080 * In pass through mode, AW must be programmed to
2081 * indicate the largest AGAW value supported by
2082 * hardware. And ASR is ignored by hardware.
2084 context_set_address_width(context, iommu->msagaw);
2087 context_set_translation_type(context, translation);
2090 context_set_fault_enable(context);
2091 context_set_present(context);
2092 domain_flush_cache(domain, context, sizeof(*context));
2095 * It's a non-present to present mapping. If hardware doesn't cache
2096 * non-present entry we only need to flush the write-buffer. If the
2097 * _does_ cache non-present entries, then it does so in the special
2098 * domain #0, which we have to flush:
2100 if (cap_caching_mode(iommu->cap)) {
2101 iommu->flush.flush_context(iommu, 0,
2102 (((u16)bus) << 8) | devfn,
2103 DMA_CCMD_MASK_NOBIT,
2104 DMA_CCMD_DEVICE_INVL);
2105 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2107 iommu_flush_write_buffer(iommu);
2109 iommu_enable_dev_iotlb(info);
2114 spin_unlock(&iommu->lock);
2115 spin_unlock_irqrestore(&device_domain_lock, flags);
2120 struct domain_context_mapping_data {
2121 struct dmar_domain *domain;
2122 struct intel_iommu *iommu;
2123 struct pasid_table *table;
2126 static int domain_context_mapping_cb(struct pci_dev *pdev,
2127 u16 alias, void *opaque)
2129 struct domain_context_mapping_data *data = opaque;
2131 return domain_context_mapping_one(data->domain, data->iommu,
2132 data->table, PCI_BUS_NUM(alias),
2137 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2139 struct domain_context_mapping_data data;
2140 struct pasid_table *table;
2141 struct intel_iommu *iommu;
2144 iommu = device_to_iommu(dev, &bus, &devfn);
2148 table = intel_pasid_get_table(dev);
2150 if (!dev_is_pci(dev))
2151 return domain_context_mapping_one(domain, iommu, table,
2154 data.domain = domain;
2158 return pci_for_each_dma_alias(to_pci_dev(dev),
2159 &domain_context_mapping_cb, &data);
2162 static int domain_context_mapped_cb(struct pci_dev *pdev,
2163 u16 alias, void *opaque)
2165 struct intel_iommu *iommu = opaque;
2167 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2170 static int domain_context_mapped(struct device *dev)
2172 struct intel_iommu *iommu;
2175 iommu = device_to_iommu(dev, &bus, &devfn);
2179 if (!dev_is_pci(dev))
2180 return device_context_mapped(iommu, bus, devfn);
2182 return !pci_for_each_dma_alias(to_pci_dev(dev),
2183 domain_context_mapped_cb, iommu);
2186 /* Returns a number of VTD pages, but aligned to MM page size */
2187 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2190 host_addr &= ~PAGE_MASK;
2191 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2194 /* Return largest possible superpage level for a given mapping */
2195 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2196 unsigned long iov_pfn,
2197 unsigned long phy_pfn,
2198 unsigned long pages)
2200 int support, level = 1;
2201 unsigned long pfnmerge;
2203 support = domain->iommu_superpage;
2205 /* To use a large page, the virtual *and* physical addresses
2206 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2207 of them will mean we have to use smaller pages. So just
2208 merge them and check both at once. */
2209 pfnmerge = iov_pfn | phy_pfn;
2211 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2212 pages >>= VTD_STRIDE_SHIFT;
2215 pfnmerge >>= VTD_STRIDE_SHIFT;
2222 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2223 struct scatterlist *sg, unsigned long phys_pfn,
2224 unsigned long nr_pages, int prot)
2226 struct dma_pte *first_pte = NULL, *pte = NULL;
2227 phys_addr_t uninitialized_var(pteval);
2228 unsigned long sg_res = 0;
2229 unsigned int largepage_lvl = 0;
2230 unsigned long lvl_pages = 0;
2232 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2234 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2237 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2241 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2244 while (nr_pages > 0) {
2248 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2250 sg_res = aligned_nrpages(sg->offset, sg->length);
2251 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2252 sg->dma_length = sg->length;
2253 pteval = (sg_phys(sg) - pgoff) | prot;
2254 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2258 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2260 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2263 /* It is large page*/
2264 if (largepage_lvl > 1) {
2265 unsigned long nr_superpages, end_pfn;
2267 pteval |= DMA_PTE_LARGE_PAGE;
2268 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2270 nr_superpages = sg_res / lvl_pages;
2271 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2274 * Ensure that old small page tables are
2275 * removed to make room for superpage(s).
2276 * We're adding new large pages, so make sure
2277 * we don't remove their parent tables.
2279 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2282 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2286 /* We don't need lock here, nobody else
2287 * touches the iova range
2289 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2291 static int dumps = 5;
2292 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2293 iov_pfn, tmp, (unsigned long long)pteval);
2296 debug_dma_dump_mappings(NULL);
2301 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2303 BUG_ON(nr_pages < lvl_pages);
2304 BUG_ON(sg_res < lvl_pages);
2306 nr_pages -= lvl_pages;
2307 iov_pfn += lvl_pages;
2308 phys_pfn += lvl_pages;
2309 pteval += lvl_pages * VTD_PAGE_SIZE;
2310 sg_res -= lvl_pages;
2312 /* If the next PTE would be the first in a new page, then we
2313 need to flush the cache on the entries we've just written.
2314 And then we'll need to recalculate 'pte', so clear it and
2315 let it get set again in the if (!pte) block above.
2317 If we're done (!nr_pages) we need to flush the cache too.
2319 Also if we've been setting superpages, we may need to
2320 recalculate 'pte' and switch back to smaller pages for the
2321 end of the mapping, if the trailing size is not enough to
2322 use another superpage (i.e. sg_res < lvl_pages). */
2324 if (!nr_pages || first_pte_in_page(pte) ||
2325 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2326 domain_flush_cache(domain, first_pte,
2327 (void *)pte - (void *)first_pte);
2331 if (!sg_res && nr_pages)
2337 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2338 struct scatterlist *sg, unsigned long phys_pfn,
2339 unsigned long nr_pages, int prot)
2342 struct intel_iommu *iommu;
2344 /* Do the real mapping first */
2345 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2349 for_each_domain_iommu(iommu_id, domain) {
2350 iommu = g_iommus[iommu_id];
2351 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2357 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2358 struct scatterlist *sg, unsigned long nr_pages,
2361 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2364 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2365 unsigned long phys_pfn, unsigned long nr_pages,
2368 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2371 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2373 unsigned long flags;
2374 struct context_entry *context;
2380 spin_lock_irqsave(&iommu->lock, flags);
2381 context = iommu_context_addr(iommu, bus, devfn, 0);
2383 spin_unlock_irqrestore(&iommu->lock, flags);
2386 did_old = context_domain_id(context);
2387 context_clear_entry(context);
2388 __iommu_flush_cache(iommu, context, sizeof(*context));
2389 spin_unlock_irqrestore(&iommu->lock, flags);
2390 iommu->flush.flush_context(iommu,
2392 (((u16)bus) << 8) | devfn,
2393 DMA_CCMD_MASK_NOBIT,
2394 DMA_CCMD_DEVICE_INVL);
2395 iommu->flush.flush_iotlb(iommu,
2402 static inline void unlink_domain_info(struct device_domain_info *info)
2404 assert_spin_locked(&device_domain_lock);
2405 list_del(&info->link);
2406 list_del(&info->global);
2408 info->dev->archdata.iommu = NULL;
2411 static void domain_remove_dev_info(struct dmar_domain *domain)
2413 struct device_domain_info *info, *tmp;
2414 unsigned long flags;
2416 spin_lock_irqsave(&device_domain_lock, flags);
2417 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2418 __dmar_remove_one_dev_info(info);
2419 spin_unlock_irqrestore(&device_domain_lock, flags);
2424 * Note: we use struct device->archdata.iommu stores the info
2426 static struct dmar_domain *find_domain(struct device *dev)
2428 struct device_domain_info *info;
2430 if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
2431 struct iommu_domain *domain;
2433 dev->archdata.iommu = NULL;
2434 domain = iommu_get_domain_for_dev(dev);
2436 intel_iommu_attach_device(domain, dev);
2439 /* No lock here, assumes no domain exit in normal case */
2440 info = dev->archdata.iommu;
2443 return info->domain;
2447 static inline struct device_domain_info *
2448 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2450 struct device_domain_info *info;
2452 list_for_each_entry(info, &device_domain_list, global)
2453 if (info->iommu->segment == segment && info->bus == bus &&
2454 info->devfn == devfn)
2460 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2463 struct dmar_domain *domain)
2465 struct dmar_domain *found = NULL;
2466 struct device_domain_info *info;
2467 unsigned long flags;
2470 info = alloc_devinfo_mem();
2475 info->devfn = devfn;
2476 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2477 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2480 info->domain = domain;
2481 info->iommu = iommu;
2482 info->pasid_table = NULL;
2483 info->auxd_enabled = 0;
2484 INIT_LIST_HEAD(&info->auxiliary_domains);
2486 if (dev && dev_is_pci(dev)) {
2487 struct pci_dev *pdev = to_pci_dev(info->dev);
2489 if (!pdev->untrusted &&
2490 !pci_ats_disabled() &&
2491 ecap_dev_iotlb_support(iommu->ecap) &&
2492 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2493 dmar_find_matched_atsr_unit(pdev))
2494 info->ats_supported = 1;
2496 if (sm_supported(iommu)) {
2497 if (pasid_supported(iommu)) {
2498 int features = pci_pasid_features(pdev);
2500 info->pasid_supported = features | 1;
2503 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2504 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2505 info->pri_supported = 1;
2509 spin_lock_irqsave(&device_domain_lock, flags);
2511 found = find_domain(dev);
2514 struct device_domain_info *info2;
2515 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2517 found = info2->domain;
2523 spin_unlock_irqrestore(&device_domain_lock, flags);
2524 free_devinfo_mem(info);
2525 /* Caller must free the original domain */
2529 spin_lock(&iommu->lock);
2530 ret = domain_attach_iommu(domain, iommu);
2531 spin_unlock(&iommu->lock);
2534 spin_unlock_irqrestore(&device_domain_lock, flags);
2535 free_devinfo_mem(info);
2539 list_add(&info->link, &domain->devices);
2540 list_add(&info->global, &device_domain_list);
2542 dev->archdata.iommu = info;
2543 spin_unlock_irqrestore(&device_domain_lock, flags);
2545 /* PASID table is mandatory for a PCI device in scalable mode. */
2546 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2547 ret = intel_pasid_alloc_table(dev);
2549 dev_err(dev, "PASID table allocation failed\n");
2550 dmar_remove_one_dev_info(dev);
2554 /* Setup the PASID entry for requests without PASID: */
2555 spin_lock(&iommu->lock);
2556 if (hw_pass_through && domain_type_is_si(domain))
2557 ret = intel_pasid_setup_pass_through(iommu, domain,
2558 dev, PASID_RID2PASID);
2560 ret = intel_pasid_setup_second_level(iommu, domain,
2561 dev, PASID_RID2PASID);
2562 spin_unlock(&iommu->lock);
2564 dev_err(dev, "Setup RID2PASID failed\n");
2565 dmar_remove_one_dev_info(dev);
2570 if (dev && domain_context_mapping(domain, dev)) {
2571 dev_err(dev, "Domain context map failed\n");
2572 dmar_remove_one_dev_info(dev);
2579 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2581 *(u16 *)opaque = alias;
2585 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2587 struct device_domain_info *info;
2588 struct dmar_domain *domain = NULL;
2589 struct intel_iommu *iommu;
2591 unsigned long flags;
2594 iommu = device_to_iommu(dev, &bus, &devfn);
2598 if (dev_is_pci(dev)) {
2599 struct pci_dev *pdev = to_pci_dev(dev);
2601 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2603 spin_lock_irqsave(&device_domain_lock, flags);
2604 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2605 PCI_BUS_NUM(dma_alias),
2608 iommu = info->iommu;
2609 domain = info->domain;
2611 spin_unlock_irqrestore(&device_domain_lock, flags);
2613 /* DMA alias already has a domain, use it */
2618 /* Allocate and initialize new domain for the device */
2619 domain = alloc_domain(0);
2622 if (domain_init(domain, iommu, gaw)) {
2623 domain_exit(domain);
2631 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2632 struct dmar_domain *domain)
2634 struct intel_iommu *iommu;
2635 struct dmar_domain *tmp;
2636 u16 req_id, dma_alias;
2639 iommu = device_to_iommu(dev, &bus, &devfn);
2643 req_id = ((u16)bus << 8) | devfn;
2645 if (dev_is_pci(dev)) {
2646 struct pci_dev *pdev = to_pci_dev(dev);
2648 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2650 /* register PCI DMA alias device */
2651 if (req_id != dma_alias) {
2652 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2653 dma_alias & 0xff, NULL, domain);
2655 if (!tmp || tmp != domain)
2660 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2661 if (!tmp || tmp != domain)
2667 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2669 struct dmar_domain *domain, *tmp;
2671 domain = find_domain(dev);
2675 domain = find_or_alloc_domain(dev, gaw);
2679 tmp = set_domain_for_dev(dev, domain);
2680 if (!tmp || domain != tmp) {
2681 domain_exit(domain);
2690 static int iommu_domain_identity_map(struct dmar_domain *domain,
2691 unsigned long long start,
2692 unsigned long long end)
2694 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2695 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2697 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2698 dma_to_mm_pfn(last_vpfn))) {
2699 pr_err("Reserving iova failed\n");
2703 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2705 * RMRR range might have overlap with physical memory range,
2708 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2710 return __domain_mapping(domain, first_vpfn, NULL,
2711 first_vpfn, last_vpfn - first_vpfn + 1,
2712 DMA_PTE_READ|DMA_PTE_WRITE);
2715 static int domain_prepare_identity_map(struct device *dev,
2716 struct dmar_domain *domain,
2717 unsigned long long start,
2718 unsigned long long end)
2720 /* For _hardware_ passthrough, don't bother. But for software
2721 passthrough, we do it anyway -- it may indicate a memory
2722 range which is reserved in E820, so which didn't get set
2723 up to start with in si_domain */
2724 if (domain == si_domain && hw_pass_through) {
2725 dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
2730 dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
2733 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2734 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2735 dmi_get_system_info(DMI_BIOS_VENDOR),
2736 dmi_get_system_info(DMI_BIOS_VERSION),
2737 dmi_get_system_info(DMI_PRODUCT_VERSION));
2741 if (end >> agaw_to_width(domain->agaw)) {
2742 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2743 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2744 agaw_to_width(domain->agaw),
2745 dmi_get_system_info(DMI_BIOS_VENDOR),
2746 dmi_get_system_info(DMI_BIOS_VERSION),
2747 dmi_get_system_info(DMI_PRODUCT_VERSION));
2751 return iommu_domain_identity_map(domain, start, end);
2754 static int iommu_prepare_identity_map(struct device *dev,
2755 unsigned long long start,
2756 unsigned long long end)
2758 struct dmar_domain *domain;
2761 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2765 ret = domain_prepare_identity_map(dev, domain, start, end);
2767 domain_exit(domain);
2772 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2775 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2777 return iommu_prepare_identity_map(dev, rmrr->base_address,
2781 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2783 static int __init si_domain_init(int hw)
2785 struct dmar_rmrr_unit *rmrr;
2789 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2793 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2794 domain_exit(si_domain);
2801 for_each_online_node(nid) {
2802 unsigned long start_pfn, end_pfn;
2805 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2806 ret = iommu_domain_identity_map(si_domain,
2807 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2814 * Normally we use DMA domains for devices which have RMRRs. But we
2815 * loose this requirement for graphic and usb devices. Identity map
2816 * the RMRRs for graphic and USB devices so that they could use the
2819 for_each_rmrr_units(rmrr) {
2820 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2822 unsigned long long start = rmrr->base_address;
2823 unsigned long long end = rmrr->end_address;
2825 if (device_is_rmrr_locked(dev))
2828 if (WARN_ON(end < start ||
2829 end >> agaw_to_width(si_domain->agaw)))
2832 ret = iommu_domain_identity_map(si_domain, start, end);
2841 static int identity_mapping(struct device *dev)
2843 struct device_domain_info *info;
2845 info = dev->archdata.iommu;
2846 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2847 return (info->domain == si_domain);
2852 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2854 struct dmar_domain *ndomain;
2855 struct intel_iommu *iommu;
2858 iommu = device_to_iommu(dev, &bus, &devfn);
2862 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2863 if (ndomain != domain)
2869 static bool device_has_rmrr(struct device *dev)
2871 struct dmar_rmrr_unit *rmrr;
2876 for_each_rmrr_units(rmrr) {
2878 * Return TRUE if this RMRR contains the device that
2881 for_each_active_dev_scope(rmrr->devices,
2882 rmrr->devices_cnt, i, tmp)
2893 * There are a couple cases where we need to restrict the functionality of
2894 * devices associated with RMRRs. The first is when evaluating a device for
2895 * identity mapping because problems exist when devices are moved in and out
2896 * of domains and their respective RMRR information is lost. This means that
2897 * a device with associated RMRRs will never be in a "passthrough" domain.
2898 * The second is use of the device through the IOMMU API. This interface
2899 * expects to have full control of the IOVA space for the device. We cannot
2900 * satisfy both the requirement that RMRR access is maintained and have an
2901 * unencumbered IOVA space. We also have no ability to quiesce the device's
2902 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2903 * We therefore prevent devices associated with an RMRR from participating in
2904 * the IOMMU API, which eliminates them from device assignment.
2906 * In both cases we assume that PCI USB devices with RMRRs have them largely
2907 * for historical reasons and that the RMRR space is not actively used post
2908 * boot. This exclusion may change if vendors begin to abuse it.
2910 * The same exception is made for graphics devices, with the requirement that
2911 * any use of the RMRR regions will be torn down before assigning the device
2914 static bool device_is_rmrr_locked(struct device *dev)
2916 if (!device_has_rmrr(dev))
2919 if (dev_is_pci(dev)) {
2920 struct pci_dev *pdev = to_pci_dev(dev);
2922 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2930 * Return the required default domain type for a specific device.
2932 * @dev: the device in query
2933 * @startup: true if this is during early boot
2936 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2937 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2938 * - 0: both identity and dynamic domains work for this device
2940 static int device_def_domain_type(struct device *dev)
2942 if (dev_is_pci(dev)) {
2943 struct pci_dev *pdev = to_pci_dev(dev);
2945 if (device_is_rmrr_locked(dev))
2946 return IOMMU_DOMAIN_DMA;
2949 * Prevent any device marked as untrusted from getting
2950 * placed into the statically identity mapping domain.
2952 if (pdev->untrusted)
2953 return IOMMU_DOMAIN_DMA;
2955 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2956 return IOMMU_DOMAIN_IDENTITY;
2958 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2959 return IOMMU_DOMAIN_IDENTITY;
2962 * We want to start off with all devices in the 1:1 domain, and
2963 * take them out later if we find they can't access all of memory.
2965 * However, we can't do this for PCI devices behind bridges,
2966 * because all PCI devices behind the same bridge will end up
2967 * with the same source-id on their transactions.
2969 * Practically speaking, we can't change things around for these
2970 * devices at run-time, because we can't be sure there'll be no
2971 * DMA transactions in flight for any of their siblings.
2973 * So PCI devices (unless they're on the root bus) as well as
2974 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2975 * the 1:1 domain, just in _case_ one of their siblings turns out
2976 * not to be able to map all of memory.
2978 if (!pci_is_pcie(pdev)) {
2979 if (!pci_is_root_bus(pdev->bus))
2980 return IOMMU_DOMAIN_DMA;
2981 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2982 return IOMMU_DOMAIN_DMA;
2983 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2984 return IOMMU_DOMAIN_DMA;
2986 if (device_has_rmrr(dev))
2987 return IOMMU_DOMAIN_DMA;
2990 return (iommu_identity_mapping & IDENTMAP_ALL) ?
2991 IOMMU_DOMAIN_IDENTITY : 0;
2994 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2997 * Start from the sane iommu hardware state.
2998 * If the queued invalidation is already initialized by us
2999 * (for example, while enabling interrupt-remapping) then
3000 * we got the things already rolling from a sane state.
3004 * Clear any previous faults.
3006 dmar_fault(-1, iommu);
3008 * Disable queued invalidation if supported and already enabled
3009 * before OS handover.
3011 dmar_disable_qi(iommu);
3014 if (dmar_enable_qi(iommu)) {
3016 * Queued Invalidate not enabled, use Register Based Invalidate
3018 iommu->flush.flush_context = __iommu_flush_context;
3019 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3020 pr_info("%s: Using Register based invalidation\n",
3023 iommu->flush.flush_context = qi_flush_context;
3024 iommu->flush.flush_iotlb = qi_flush_iotlb;
3025 pr_info("%s: Using Queued invalidation\n", iommu->name);
3029 static int copy_context_table(struct intel_iommu *iommu,
3030 struct root_entry *old_re,
3031 struct context_entry **tbl,
3034 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3035 struct context_entry *new_ce = NULL, ce;
3036 struct context_entry *old_ce = NULL;
3037 struct root_entry re;
3038 phys_addr_t old_ce_phys;
3040 tbl_idx = ext ? bus * 2 : bus;
3041 memcpy(&re, old_re, sizeof(re));
3043 for (devfn = 0; devfn < 256; devfn++) {
3044 /* First calculate the correct index */
3045 idx = (ext ? devfn * 2 : devfn) % 256;
3048 /* First save what we may have and clean up */
3050 tbl[tbl_idx] = new_ce;
3051 __iommu_flush_cache(iommu, new_ce,
3061 old_ce_phys = root_entry_lctp(&re);
3063 old_ce_phys = root_entry_uctp(&re);
3066 if (ext && devfn == 0) {
3067 /* No LCTP, try UCTP */
3076 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3081 new_ce = alloc_pgtable_page(iommu->node);
3088 /* Now copy the context entry */
3089 memcpy(&ce, old_ce + idx, sizeof(ce));
3091 if (!__context_present(&ce))
3094 did = context_domain_id(&ce);
3095 if (did >= 0 && did < cap_ndoms(iommu->cap))
3096 set_bit(did, iommu->domain_ids);
3099 * We need a marker for copied context entries. This
3100 * marker needs to work for the old format as well as
3101 * for extended context entries.
3103 * Bit 67 of the context entry is used. In the old
3104 * format this bit is available to software, in the
3105 * extended format it is the PGE bit, but PGE is ignored
3106 * by HW if PASIDs are disabled (and thus still
3109 * So disable PASIDs first and then mark the entry
3110 * copied. This means that we don't copy PASID
3111 * translations from the old kernel, but this is fine as
3112 * faults there are not fatal.
3114 context_clear_pasid_enable(&ce);
3115 context_set_copied(&ce);
3120 tbl[tbl_idx + pos] = new_ce;
3122 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3131 static int copy_translation_tables(struct intel_iommu *iommu)
3133 struct context_entry **ctxt_tbls;
3134 struct root_entry *old_rt;
3135 phys_addr_t old_rt_phys;
3136 int ctxt_table_entries;
3137 unsigned long flags;
3142 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3143 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3144 new_ext = !!ecap_ecs(iommu->ecap);
3147 * The RTT bit can only be changed when translation is disabled,
3148 * but disabling translation means to open a window for data
3149 * corruption. So bail out and don't copy anything if we would
3150 * have to change the bit.
3155 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3159 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3163 /* This is too big for the stack - allocate it from slab */
3164 ctxt_table_entries = ext ? 512 : 256;
3166 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3170 for (bus = 0; bus < 256; bus++) {
3171 ret = copy_context_table(iommu, &old_rt[bus],
3172 ctxt_tbls, bus, ext);
3174 pr_err("%s: Failed to copy context table for bus %d\n",
3180 spin_lock_irqsave(&iommu->lock, flags);
3182 /* Context tables are copied, now write them to the root_entry table */
3183 for (bus = 0; bus < 256; bus++) {
3184 int idx = ext ? bus * 2 : bus;
3187 if (ctxt_tbls[idx]) {
3188 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3189 iommu->root_entry[bus].lo = val;
3192 if (!ext || !ctxt_tbls[idx + 1])
3195 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3196 iommu->root_entry[bus].hi = val;
3199 spin_unlock_irqrestore(&iommu->lock, flags);
3203 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3213 static int __init init_dmars(void)
3215 struct dmar_drhd_unit *drhd;
3216 struct intel_iommu *iommu;
3222 * initialize and program root entry to not present
3225 for_each_drhd_unit(drhd) {
3227 * lock not needed as this is only incremented in the single
3228 * threaded kernel __init code path all other access are read
3231 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3235 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3238 /* Preallocate enough resources for IOMMU hot-addition */
3239 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3240 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3242 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3245 pr_err("Allocating global iommu array failed\n");
3250 for_each_active_iommu(iommu, drhd) {
3252 * Find the max pasid size of all IOMMU's in the system.
3253 * We need to ensure the system pasid table is no bigger
3254 * than the smallest supported.
3256 if (pasid_supported(iommu)) {
3257 u32 temp = 2 << ecap_pss(iommu->ecap);
3259 intel_pasid_max_id = min_t(u32, temp,
3260 intel_pasid_max_id);
3263 g_iommus[iommu->seq_id] = iommu;
3265 intel_iommu_init_qi(iommu);
3267 ret = iommu_init_domains(iommu);
3271 init_translation_status(iommu);
3273 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3274 iommu_disable_translation(iommu);
3275 clear_translation_pre_enabled(iommu);
3276 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3282 * we could share the same root & context tables
3283 * among all IOMMU's. Need to Split it later.
3285 ret = iommu_alloc_root_entry(iommu);
3289 if (translation_pre_enabled(iommu)) {
3290 pr_info("Translation already enabled - trying to copy translation structures\n");
3292 ret = copy_translation_tables(iommu);
3295 * We found the IOMMU with translation
3296 * enabled - but failed to copy over the
3297 * old root-entry table. Try to proceed
3298 * by disabling translation now and
3299 * allocating a clean root-entry table.
3300 * This might cause DMAR faults, but
3301 * probably the dump will still succeed.
3303 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3305 iommu_disable_translation(iommu);
3306 clear_translation_pre_enabled(iommu);
3308 pr_info("Copied translation tables from previous kernel for %s\n",
3313 if (!ecap_pass_through(iommu->ecap))
3314 hw_pass_through = 0;
3315 #ifdef CONFIG_INTEL_IOMMU_SVM
3316 if (pasid_supported(iommu))
3317 intel_svm_init(iommu);
3322 * Now that qi is enabled on all iommus, set the root entry and flush
3323 * caches. This is required on some Intel X58 chipsets, otherwise the
3324 * flush_context function will loop forever and the boot hangs.
3326 for_each_active_iommu(iommu, drhd) {
3327 iommu_flush_write_buffer(iommu);
3328 iommu_set_root_entry(iommu);
3329 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3330 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3333 if (iommu_pass_through)
3334 iommu_identity_mapping |= IDENTMAP_ALL;
3336 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3341 iommu_identity_mapping |= IDENTMAP_GFX;
3343 check_tylersburg_isoch();
3345 ret = si_domain_init(hw_pass_through);
3352 * global invalidate context cache
3353 * global invalidate iotlb
3354 * enable translation
3356 for_each_iommu(iommu, drhd) {
3357 if (drhd->ignored) {
3359 * we always have to disable PMRs or DMA may fail on
3363 iommu_disable_protect_mem_regions(iommu);
3367 iommu_flush_write_buffer(iommu);
3369 #ifdef CONFIG_INTEL_IOMMU_SVM
3370 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3372 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3373 * could cause possible lock race condition.
3375 up_write(&dmar_global_lock);
3376 ret = intel_svm_enable_prq(iommu);
3377 down_write(&dmar_global_lock);
3382 ret = dmar_set_interrupt(iommu);
3390 for_each_active_iommu(iommu, drhd) {
3391 disable_dmar_iommu(iommu);
3392 free_dmar_iommu(iommu);
3401 /* This takes a number of _MM_ pages, not VTD pages */
3402 static unsigned long intel_alloc_iova(struct device *dev,
3403 struct dmar_domain *domain,
3404 unsigned long nrpages, uint64_t dma_mask)
3406 unsigned long iova_pfn;
3408 /* Restrict dma_mask to the width that the iommu can handle */
3409 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3410 /* Ensure we reserve the whole size-aligned region */
3411 nrpages = __roundup_pow_of_two(nrpages);
3413 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3415 * First try to allocate an io virtual address in
3416 * DMA_BIT_MASK(32) and if that fails then try allocating
3419 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3420 IOVA_PFN(DMA_BIT_MASK(32)), false);
3424 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3425 IOVA_PFN(dma_mask), true);
3426 if (unlikely(!iova_pfn)) {
3427 dev_err(dev, "Allocating %ld-page iova failed", nrpages);
3434 static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
3436 struct dmar_domain *domain, *tmp;
3437 struct dmar_rmrr_unit *rmrr;
3438 struct device *i_dev;
3441 /* Device shouldn't be attached by any domains. */
3442 domain = find_domain(dev);
3446 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3450 /* We have a new domain - setup possible RMRRs for the device */
3452 for_each_rmrr_units(rmrr) {
3453 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3458 ret = domain_prepare_identity_map(dev, domain,
3462 dev_err(dev, "Mapping reserved region failed\n");
3467 tmp = set_domain_for_dev(dev, domain);
3468 if (!tmp || domain != tmp) {
3469 domain_exit(domain);
3475 dev_err(dev, "Allocating domain failed\n");
3480 /* Check if the dev needs to go through non-identity map and unmap process.*/
3481 static bool iommu_need_mapping(struct device *dev)
3485 if (iommu_dummy(dev))
3488 ret = identity_mapping(dev);
3490 u64 dma_mask = *dev->dma_mask;
3492 if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
3493 dma_mask = dev->coherent_dma_mask;
3495 if (dma_mask >= dma_get_required_mask(dev))
3499 * 32 bit DMA is removed from si_domain and fall back to
3500 * non-identity mapping.
3502 dmar_remove_one_dev_info(dev);
3503 ret = iommu_request_dma_domain_for_dev(dev);
3505 struct iommu_domain *domain;
3506 struct dmar_domain *dmar_domain;
3508 domain = iommu_get_domain_for_dev(dev);
3510 dmar_domain = to_dmar_domain(domain);
3511 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
3513 get_private_domain_for_dev(dev);
3516 dev_info(dev, "32bit DMA uses non-identity mapping\n");
3522 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3523 size_t size, int dir, u64 dma_mask)
3525 struct dmar_domain *domain;
3526 phys_addr_t start_paddr;
3527 unsigned long iova_pfn;
3530 struct intel_iommu *iommu;
3531 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3533 BUG_ON(dir == DMA_NONE);
3535 domain = find_domain(dev);
3537 return DMA_MAPPING_ERROR;
3539 iommu = domain_get_iommu(domain);
3540 size = aligned_nrpages(paddr, size);
3542 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3547 * Check if DMAR supports zero-length reads on write only
3550 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3551 !cap_zlr(iommu->cap))
3552 prot |= DMA_PTE_READ;
3553 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3554 prot |= DMA_PTE_WRITE;
3556 * paddr - (paddr + size) might be partial page, we should map the whole
3557 * page. Note: if two part of one page are separately mapped, we
3558 * might have two guest_addr mapping to the same host paddr, but this
3559 * is not a big problem
3561 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3562 mm_to_dma_pfn(paddr_pfn), size, prot);
3566 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3567 start_paddr += paddr & ~PAGE_MASK;
3572 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3573 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3574 size, (unsigned long long)paddr, dir);
3575 return DMA_MAPPING_ERROR;
3578 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3579 unsigned long offset, size_t size,
3580 enum dma_data_direction dir,
3581 unsigned long attrs)
3583 if (iommu_need_mapping(dev))
3584 return __intel_map_single(dev, page_to_phys(page) + offset,
3585 size, dir, *dev->dma_mask);
3586 return dma_direct_map_page(dev, page, offset, size, dir, attrs);
3589 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3590 size_t size, enum dma_data_direction dir,
3591 unsigned long attrs)
3593 if (iommu_need_mapping(dev))
3594 return __intel_map_single(dev, phys_addr, size, dir,
3596 return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3599 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3601 struct dmar_domain *domain;
3602 unsigned long start_pfn, last_pfn;
3603 unsigned long nrpages;
3604 unsigned long iova_pfn;
3605 struct intel_iommu *iommu;
3606 struct page *freelist;
3607 struct pci_dev *pdev = NULL;
3609 domain = find_domain(dev);
3612 iommu = domain_get_iommu(domain);
3614 iova_pfn = IOVA_PFN(dev_addr);
3616 nrpages = aligned_nrpages(dev_addr, size);
3617 start_pfn = mm_to_dma_pfn(iova_pfn);
3618 last_pfn = start_pfn + nrpages - 1;
3620 if (dev_is_pci(dev))
3621 pdev = to_pci_dev(dev);
3623 dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3625 freelist = domain_unmap(domain, start_pfn, last_pfn);
3627 if (intel_iommu_strict || (pdev && pdev->untrusted)) {
3628 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3629 nrpages, !freelist, 0);
3631 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3632 dma_free_pagelist(freelist);
3634 queue_iova(&domain->iovad, iova_pfn, nrpages,
3635 (unsigned long)freelist);
3637 * queue up the release of the unmap to save the 1/6th of the
3638 * cpu used up by the iotlb flush operation...
3643 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3644 size_t size, enum dma_data_direction dir,
3645 unsigned long attrs)
3647 if (iommu_need_mapping(dev))
3648 intel_unmap(dev, dev_addr, size);
3650 dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
3653 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3654 size_t size, enum dma_data_direction dir, unsigned long attrs)
3656 if (iommu_need_mapping(dev))
3657 intel_unmap(dev, dev_addr, size);
3660 static void *intel_alloc_coherent(struct device *dev, size_t size,
3661 dma_addr_t *dma_handle, gfp_t flags,
3662 unsigned long attrs)
3664 struct page *page = NULL;
3667 if (!iommu_need_mapping(dev))
3668 return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3670 size = PAGE_ALIGN(size);
3671 order = get_order(size);
3673 if (gfpflags_allow_blocking(flags)) {
3674 unsigned int count = size >> PAGE_SHIFT;
3676 page = dma_alloc_from_contiguous(dev, count, order,
3677 flags & __GFP_NOWARN);
3681 page = alloc_pages(flags, order);
3684 memset(page_address(page), 0, size);
3686 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3688 dev->coherent_dma_mask);
3689 if (*dma_handle != DMA_MAPPING_ERROR)
3690 return page_address(page);
3691 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3692 __free_pages(page, order);
3697 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3698 dma_addr_t dma_handle, unsigned long attrs)
3701 struct page *page = virt_to_page(vaddr);
3703 if (!iommu_need_mapping(dev))
3704 return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3706 size = PAGE_ALIGN(size);
3707 order = get_order(size);
3709 intel_unmap(dev, dma_handle, size);
3710 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3711 __free_pages(page, order);
3714 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3715 int nelems, enum dma_data_direction dir,
3716 unsigned long attrs)
3718 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3719 unsigned long nrpages = 0;
3720 struct scatterlist *sg;
3723 if (!iommu_need_mapping(dev))
3724 return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
3726 for_each_sg(sglist, sg, nelems, i) {
3727 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3730 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3733 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3734 enum dma_data_direction dir, unsigned long attrs)
3737 struct dmar_domain *domain;
3740 unsigned long iova_pfn;
3742 struct scatterlist *sg;
3743 unsigned long start_vpfn;
3744 struct intel_iommu *iommu;
3746 BUG_ON(dir == DMA_NONE);
3747 if (!iommu_need_mapping(dev))
3748 return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
3750 domain = find_domain(dev);
3754 iommu = domain_get_iommu(domain);
3756 for_each_sg(sglist, sg, nelems, i)
3757 size += aligned_nrpages(sg->offset, sg->length);
3759 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3762 sglist->dma_length = 0;
3767 * Check if DMAR supports zero-length reads on write only
3770 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3771 !cap_zlr(iommu->cap))
3772 prot |= DMA_PTE_READ;
3773 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3774 prot |= DMA_PTE_WRITE;
3776 start_vpfn = mm_to_dma_pfn(iova_pfn);
3778 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3779 if (unlikely(ret)) {
3780 dma_pte_free_pagetable(domain, start_vpfn,
3781 start_vpfn + size - 1,
3782 agaw_to_level(domain->agaw) + 1);
3783 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3790 static const struct dma_map_ops intel_dma_ops = {
3791 .alloc = intel_alloc_coherent,
3792 .free = intel_free_coherent,
3793 .map_sg = intel_map_sg,
3794 .unmap_sg = intel_unmap_sg,
3795 .map_page = intel_map_page,
3796 .unmap_page = intel_unmap_page,
3797 .map_resource = intel_map_resource,
3798 .unmap_resource = intel_unmap_resource,
3799 .dma_supported = dma_direct_supported,
3802 static inline int iommu_domain_cache_init(void)
3806 iommu_domain_cache = kmem_cache_create("iommu_domain",
3807 sizeof(struct dmar_domain),
3812 if (!iommu_domain_cache) {
3813 pr_err("Couldn't create iommu_domain cache\n");
3820 static inline int iommu_devinfo_cache_init(void)
3824 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3825 sizeof(struct device_domain_info),
3829 if (!iommu_devinfo_cache) {
3830 pr_err("Couldn't create devinfo cache\n");
3837 static int __init iommu_init_mempool(void)
3840 ret = iova_cache_get();
3844 ret = iommu_domain_cache_init();
3848 ret = iommu_devinfo_cache_init();
3852 kmem_cache_destroy(iommu_domain_cache);
3859 static void __init iommu_exit_mempool(void)
3861 kmem_cache_destroy(iommu_devinfo_cache);
3862 kmem_cache_destroy(iommu_domain_cache);
3866 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3868 struct dmar_drhd_unit *drhd;
3872 /* We know that this device on this chipset has its own IOMMU.
3873 * If we find it under a different IOMMU, then the BIOS is lying
3874 * to us. Hope that the IOMMU for this device is actually
3875 * disabled, and it needs no translation...
3877 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3879 /* "can't" happen */
3880 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3883 vtbar &= 0xffff0000;
3885 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3886 drhd = dmar_find_matched_drhd_unit(pdev);
3887 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3888 TAINT_FIRMWARE_WORKAROUND,
3889 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3890 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3892 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3894 static void __init init_no_remapping_devices(void)
3896 struct dmar_drhd_unit *drhd;
3900 for_each_drhd_unit(drhd) {
3901 if (!drhd->include_all) {
3902 for_each_active_dev_scope(drhd->devices,
3903 drhd->devices_cnt, i, dev)
3905 /* ignore DMAR unit if no devices exist */
3906 if (i == drhd->devices_cnt)
3911 for_each_active_drhd_unit(drhd) {
3912 if (drhd->include_all)
3915 for_each_active_dev_scope(drhd->devices,
3916 drhd->devices_cnt, i, dev)
3917 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3919 if (i < drhd->devices_cnt)
3922 /* This IOMMU has *only* gfx devices. Either bypass it or
3923 set the gfx_mapped flag, as appropriate */
3924 if (!dmar_map_gfx) {
3926 for_each_active_dev_scope(drhd->devices,
3927 drhd->devices_cnt, i, dev)
3928 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3933 #ifdef CONFIG_SUSPEND
3934 static int init_iommu_hw(void)
3936 struct dmar_drhd_unit *drhd;
3937 struct intel_iommu *iommu = NULL;
3939 for_each_active_iommu(iommu, drhd)
3941 dmar_reenable_qi(iommu);
3943 for_each_iommu(iommu, drhd) {
3944 if (drhd->ignored) {
3946 * we always have to disable PMRs or DMA may fail on
3950 iommu_disable_protect_mem_regions(iommu);
3954 iommu_flush_write_buffer(iommu);
3956 iommu_set_root_entry(iommu);
3958 iommu->flush.flush_context(iommu, 0, 0, 0,
3959 DMA_CCMD_GLOBAL_INVL);
3960 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3961 iommu_enable_translation(iommu);
3962 iommu_disable_protect_mem_regions(iommu);
3968 static void iommu_flush_all(void)
3970 struct dmar_drhd_unit *drhd;
3971 struct intel_iommu *iommu;
3973 for_each_active_iommu(iommu, drhd) {
3974 iommu->flush.flush_context(iommu, 0, 0, 0,
3975 DMA_CCMD_GLOBAL_INVL);
3976 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3977 DMA_TLB_GLOBAL_FLUSH);
3981 static int iommu_suspend(void)
3983 struct dmar_drhd_unit *drhd;
3984 struct intel_iommu *iommu = NULL;
3987 for_each_active_iommu(iommu, drhd) {
3988 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
3990 if (!iommu->iommu_state)
3996 for_each_active_iommu(iommu, drhd) {
3997 iommu_disable_translation(iommu);
3999 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4001 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4002 readl(iommu->reg + DMAR_FECTL_REG);
4003 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4004 readl(iommu->reg + DMAR_FEDATA_REG);
4005 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4006 readl(iommu->reg + DMAR_FEADDR_REG);
4007 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4008 readl(iommu->reg + DMAR_FEUADDR_REG);
4010 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4015 for_each_active_iommu(iommu, drhd)
4016 kfree(iommu->iommu_state);
4021 static void iommu_resume(void)
4023 struct dmar_drhd_unit *drhd;
4024 struct intel_iommu *iommu = NULL;
4027 if (init_iommu_hw()) {
4029 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4031 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4035 for_each_active_iommu(iommu, drhd) {
4037 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4039 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4040 iommu->reg + DMAR_FECTL_REG);
4041 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4042 iommu->reg + DMAR_FEDATA_REG);
4043 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4044 iommu->reg + DMAR_FEADDR_REG);
4045 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4046 iommu->reg + DMAR_FEUADDR_REG);
4048 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4051 for_each_active_iommu(iommu, drhd)
4052 kfree(iommu->iommu_state);
4055 static struct syscore_ops iommu_syscore_ops = {
4056 .resume = iommu_resume,
4057 .suspend = iommu_suspend,
4060 static void __init init_iommu_pm_ops(void)
4062 register_syscore_ops(&iommu_syscore_ops);
4066 static inline void init_iommu_pm_ops(void) {}
4067 #endif /* CONFIG_PM */
4070 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4072 struct acpi_dmar_reserved_memory *rmrr;
4073 struct dmar_rmrr_unit *rmrru;
4076 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4080 rmrru->hdr = header;
4081 rmrr = (struct acpi_dmar_reserved_memory *)header;
4082 rmrru->base_address = rmrr->base_address;
4083 rmrru->end_address = rmrr->end_address;
4085 length = rmrr->end_address - rmrr->base_address + 1;
4087 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4088 ((void *)rmrr) + rmrr->header.length,
4089 &rmrru->devices_cnt);
4090 if (rmrru->devices_cnt && rmrru->devices == NULL)
4093 list_add(&rmrru->list, &dmar_rmrr_units);
4102 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4104 struct dmar_atsr_unit *atsru;
4105 struct acpi_dmar_atsr *tmp;
4107 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4108 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4109 if (atsr->segment != tmp->segment)
4111 if (atsr->header.length != tmp->header.length)
4113 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4120 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4122 struct acpi_dmar_atsr *atsr;
4123 struct dmar_atsr_unit *atsru;
4125 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4128 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4129 atsru = dmar_find_atsr(atsr);
4133 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4138 * If memory is allocated from slab by ACPI _DSM method, we need to
4139 * copy the memory content because the memory buffer will be freed
4142 atsru->hdr = (void *)(atsru + 1);
4143 memcpy(atsru->hdr, hdr, hdr->length);
4144 atsru->include_all = atsr->flags & 0x1;
4145 if (!atsru->include_all) {
4146 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4147 (void *)atsr + atsr->header.length,
4148 &atsru->devices_cnt);
4149 if (atsru->devices_cnt && atsru->devices == NULL) {
4155 list_add_rcu(&atsru->list, &dmar_atsr_units);
4160 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4162 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4166 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4168 struct acpi_dmar_atsr *atsr;
4169 struct dmar_atsr_unit *atsru;
4171 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4172 atsru = dmar_find_atsr(atsr);
4174 list_del_rcu(&atsru->list);
4176 intel_iommu_free_atsr(atsru);
4182 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4186 struct acpi_dmar_atsr *atsr;
4187 struct dmar_atsr_unit *atsru;
4189 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4190 atsru = dmar_find_atsr(atsr);
4194 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4195 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4203 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4206 struct intel_iommu *iommu = dmaru->iommu;
4208 if (g_iommus[iommu->seq_id])
4211 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4212 pr_warn("%s: Doesn't support hardware pass through.\n",
4216 if (!ecap_sc_support(iommu->ecap) &&
4217 domain_update_iommu_snooping(iommu)) {
4218 pr_warn("%s: Doesn't support snooping.\n",
4222 sp = domain_update_iommu_superpage(iommu) - 1;
4223 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4224 pr_warn("%s: Doesn't support large page.\n",
4230 * Disable translation if already enabled prior to OS handover.
4232 if (iommu->gcmd & DMA_GCMD_TE)
4233 iommu_disable_translation(iommu);
4235 g_iommus[iommu->seq_id] = iommu;
4236 ret = iommu_init_domains(iommu);
4238 ret = iommu_alloc_root_entry(iommu);
4242 #ifdef CONFIG_INTEL_IOMMU_SVM
4243 if (pasid_supported(iommu))
4244 intel_svm_init(iommu);
4247 if (dmaru->ignored) {
4249 * we always have to disable PMRs or DMA may fail on this device
4252 iommu_disable_protect_mem_regions(iommu);
4256 intel_iommu_init_qi(iommu);
4257 iommu_flush_write_buffer(iommu);
4259 #ifdef CONFIG_INTEL_IOMMU_SVM
4260 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4261 ret = intel_svm_enable_prq(iommu);
4266 ret = dmar_set_interrupt(iommu);
4270 iommu_set_root_entry(iommu);
4271 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4272 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4273 iommu_enable_translation(iommu);
4275 iommu_disable_protect_mem_regions(iommu);
4279 disable_dmar_iommu(iommu);
4281 free_dmar_iommu(iommu);
4285 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4288 struct intel_iommu *iommu = dmaru->iommu;
4290 if (!intel_iommu_enabled)
4296 ret = intel_iommu_add(dmaru);
4298 disable_dmar_iommu(iommu);
4299 free_dmar_iommu(iommu);
4305 static void intel_iommu_free_dmars(void)
4307 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4308 struct dmar_atsr_unit *atsru, *atsr_n;
4310 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4311 list_del(&rmrru->list);
4312 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4316 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4317 list_del(&atsru->list);
4318 intel_iommu_free_atsr(atsru);
4322 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4325 struct pci_bus *bus;
4326 struct pci_dev *bridge = NULL;
4328 struct acpi_dmar_atsr *atsr;
4329 struct dmar_atsr_unit *atsru;
4331 dev = pci_physfn(dev);
4332 for (bus = dev->bus; bus; bus = bus->parent) {
4334 /* If it's an integrated device, allow ATS */
4337 /* Connected via non-PCIe: no ATS */
4338 if (!pci_is_pcie(bridge) ||
4339 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4341 /* If we found the root port, look it up in the ATSR */
4342 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4347 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4348 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4349 if (atsr->segment != pci_domain_nr(dev->bus))
4352 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4353 if (tmp == &bridge->dev)
4356 if (atsru->include_all)
4366 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4369 struct dmar_rmrr_unit *rmrru;
4370 struct dmar_atsr_unit *atsru;
4371 struct acpi_dmar_atsr *atsr;
4372 struct acpi_dmar_reserved_memory *rmrr;
4374 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4377 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4378 rmrr = container_of(rmrru->hdr,
4379 struct acpi_dmar_reserved_memory, header);
4380 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4381 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4382 ((void *)rmrr) + rmrr->header.length,
4383 rmrr->segment, rmrru->devices,
4384 rmrru->devices_cnt);
4387 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4388 dmar_remove_dev_scope(info, rmrr->segment,
4389 rmrru->devices, rmrru->devices_cnt);
4393 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4394 if (atsru->include_all)
4397 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4398 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4399 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4400 (void *)atsr + atsr->header.length,
4401 atsr->segment, atsru->devices,
4402 atsru->devices_cnt);
4407 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4408 if (dmar_remove_dev_scope(info, atsr->segment,
4409 atsru->devices, atsru->devices_cnt))
4417 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4418 unsigned long val, void *v)
4420 struct memory_notify *mhp = v;
4421 unsigned long long start, end;
4422 unsigned long start_vpfn, last_vpfn;
4425 case MEM_GOING_ONLINE:
4426 start = mhp->start_pfn << PAGE_SHIFT;
4427 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4428 if (iommu_domain_identity_map(si_domain, start, end)) {
4429 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4436 case MEM_CANCEL_ONLINE:
4437 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4438 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4439 while (start_vpfn <= last_vpfn) {
4441 struct dmar_drhd_unit *drhd;
4442 struct intel_iommu *iommu;
4443 struct page *freelist;
4445 iova = find_iova(&si_domain->iovad, start_vpfn);
4447 pr_debug("Failed get IOVA for PFN %lx\n",
4452 iova = split_and_remove_iova(&si_domain->iovad, iova,
4453 start_vpfn, last_vpfn);
4455 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4456 start_vpfn, last_vpfn);
4460 freelist = domain_unmap(si_domain, iova->pfn_lo,
4464 for_each_active_iommu(iommu, drhd)
4465 iommu_flush_iotlb_psi(iommu, si_domain,
4466 iova->pfn_lo, iova_size(iova),
4469 dma_free_pagelist(freelist);
4471 start_vpfn = iova->pfn_hi + 1;
4472 free_iova_mem(iova);
4480 static struct notifier_block intel_iommu_memory_nb = {
4481 .notifier_call = intel_iommu_memory_notifier,
4485 static void free_all_cpu_cached_iovas(unsigned int cpu)
4489 for (i = 0; i < g_num_of_iommus; i++) {
4490 struct intel_iommu *iommu = g_iommus[i];
4491 struct dmar_domain *domain;
4497 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4498 domain = get_iommu_domain(iommu, (u16)did);
4502 free_cpu_cached_iovas(cpu, &domain->iovad);
4507 static int intel_iommu_cpu_dead(unsigned int cpu)
4509 free_all_cpu_cached_iovas(cpu);
4513 static void intel_disable_iommus(void)
4515 struct intel_iommu *iommu = NULL;
4516 struct dmar_drhd_unit *drhd;
4518 for_each_iommu(iommu, drhd)
4519 iommu_disable_translation(iommu);
4522 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4524 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4526 return container_of(iommu_dev, struct intel_iommu, iommu);
4529 static ssize_t intel_iommu_show_version(struct device *dev,
4530 struct device_attribute *attr,
4533 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4534 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4535 return sprintf(buf, "%d:%d\n",
4536 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4538 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4540 static ssize_t intel_iommu_show_address(struct device *dev,
4541 struct device_attribute *attr,
4544 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4545 return sprintf(buf, "%llx\n", iommu->reg_phys);
4547 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4549 static ssize_t intel_iommu_show_cap(struct device *dev,
4550 struct device_attribute *attr,
4553 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4554 return sprintf(buf, "%llx\n", iommu->cap);
4556 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4558 static ssize_t intel_iommu_show_ecap(struct device *dev,
4559 struct device_attribute *attr,
4562 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4563 return sprintf(buf, "%llx\n", iommu->ecap);
4565 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4567 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4568 struct device_attribute *attr,
4571 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4572 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4574 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4576 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4577 struct device_attribute *attr,
4580 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4581 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4582 cap_ndoms(iommu->cap)));
4584 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4586 static struct attribute *intel_iommu_attrs[] = {
4587 &dev_attr_version.attr,
4588 &dev_attr_address.attr,
4590 &dev_attr_ecap.attr,
4591 &dev_attr_domains_supported.attr,
4592 &dev_attr_domains_used.attr,
4596 static struct attribute_group intel_iommu_group = {
4597 .name = "intel-iommu",
4598 .attrs = intel_iommu_attrs,
4601 const struct attribute_group *intel_iommu_groups[] = {
4606 static int __init platform_optin_force_iommu(void)
4608 struct pci_dev *pdev = NULL;
4609 bool has_untrusted_dev = false;
4611 if (!dmar_platform_optin() || no_platform_optin)
4614 for_each_pci_dev(pdev) {
4615 if (pdev->untrusted) {
4616 has_untrusted_dev = true;
4621 if (!has_untrusted_dev)
4624 if (no_iommu || dmar_disabled)
4625 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4628 * If Intel-IOMMU is disabled by default, we will apply identity
4629 * map for all devices except those marked as being untrusted.
4632 iommu_identity_mapping |= IDENTMAP_ALL;
4635 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4643 static int __init probe_acpi_namespace_devices(void)
4645 struct dmar_drhd_unit *drhd;
4646 struct intel_iommu *iommu;
4650 for_each_active_iommu(iommu, drhd) {
4651 for_each_active_dev_scope(drhd->devices,
4652 drhd->devices_cnt, i, dev) {
4653 struct acpi_device_physical_node *pn;
4654 struct iommu_group *group;
4655 struct acpi_device *adev;
4657 if (dev->bus != &acpi_bus_type)
4660 adev = to_acpi_device(dev);
4661 mutex_lock(&adev->physical_node_lock);
4662 list_for_each_entry(pn,
4663 &adev->physical_node_list, node) {
4664 group = iommu_group_get(pn->dev);
4666 iommu_group_put(group);
4670 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4671 ret = iommu_probe_device(pn->dev);
4675 mutex_unlock(&adev->physical_node_lock);
4685 int __init intel_iommu_init(void)
4688 struct dmar_drhd_unit *drhd;
4689 struct intel_iommu *iommu;
4692 * Intel IOMMU is required for a TXT/tboot launch or platform
4693 * opt in, so enforce that.
4695 force_on = tboot_force_iommu() || platform_optin_force_iommu();
4697 if (iommu_init_mempool()) {
4699 panic("tboot: Failed to initialize iommu memory\n");
4703 down_write(&dmar_global_lock);
4704 if (dmar_table_init()) {
4706 panic("tboot: Failed to initialize DMAR table\n");
4710 if (dmar_dev_scope_init() < 0) {
4712 panic("tboot: Failed to initialize DMAR device scope\n");
4716 up_write(&dmar_global_lock);
4719 * The bus notifier takes the dmar_global_lock, so lockdep will
4720 * complain later when we register it under the lock.
4722 dmar_register_bus_notifier();
4724 down_write(&dmar_global_lock);
4726 if (no_iommu || dmar_disabled) {
4728 * We exit the function here to ensure IOMMU's remapping and
4729 * mempool aren't setup, which means that the IOMMU's PMRs
4730 * won't be disabled via the call to init_dmars(). So disable
4731 * it explicitly here. The PMRs were setup by tboot prior to
4732 * calling SENTER, but the kernel is expected to reset/tear
4735 if (intel_iommu_tboot_noforce) {
4736 for_each_iommu(iommu, drhd)
4737 iommu_disable_protect_mem_regions(iommu);
4741 * Make sure the IOMMUs are switched off, even when we
4742 * boot into a kexec kernel and the previous kernel left
4745 intel_disable_iommus();
4749 if (list_empty(&dmar_rmrr_units))
4750 pr_info("No RMRR found\n");
4752 if (list_empty(&dmar_atsr_units))
4753 pr_info("No ATSR found\n");
4755 if (dmar_init_reserved_ranges()) {
4757 panic("tboot: Failed to reserve iommu ranges\n");
4758 goto out_free_reserved_range;
4762 intel_iommu_gfx_mapped = 1;
4764 init_no_remapping_devices();
4769 panic("tboot: Failed to initialize DMARs\n");
4770 pr_err("Initialization failed\n");
4771 goto out_free_reserved_range;
4773 up_write(&dmar_global_lock);
4775 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4778 dma_ops = &intel_dma_ops;
4780 init_iommu_pm_ops();
4782 for_each_active_iommu(iommu, drhd) {
4783 iommu_device_sysfs_add(&iommu->iommu, NULL,
4786 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4787 iommu_device_register(&iommu->iommu);
4790 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4791 if (si_domain && !hw_pass_through)
4792 register_memory_notifier(&intel_iommu_memory_nb);
4793 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4794 intel_iommu_cpu_dead);
4796 if (probe_acpi_namespace_devices())
4797 pr_warn("ACPI name space devices didn't probe correctly\n");
4799 /* Finally, we enable the DMA remapping hardware. */
4800 for_each_iommu(iommu, drhd) {
4801 if (!translation_pre_enabled(iommu))
4802 iommu_enable_translation(iommu);
4804 iommu_disable_protect_mem_regions(iommu);
4806 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4808 intel_iommu_enabled = 1;
4809 intel_iommu_debugfs_init();
4813 out_free_reserved_range:
4814 put_iova_domain(&reserved_iova_list);
4816 intel_iommu_free_dmars();
4817 up_write(&dmar_global_lock);
4818 iommu_exit_mempool();
4822 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4824 struct intel_iommu *iommu = opaque;
4826 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4831 * NB - intel-iommu lacks any sort of reference counting for the users of
4832 * dependent devices. If multiple endpoints have intersecting dependent
4833 * devices, unbinding the driver from any one of them will possibly leave
4834 * the others unable to operate.
4836 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4838 if (!iommu || !dev || !dev_is_pci(dev))
4841 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4844 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4846 struct dmar_domain *domain;
4847 struct intel_iommu *iommu;
4848 unsigned long flags;
4850 assert_spin_locked(&device_domain_lock);
4855 iommu = info->iommu;
4856 domain = info->domain;
4859 if (dev_is_pci(info->dev) && sm_supported(iommu))
4860 intel_pasid_tear_down_entry(iommu, info->dev,
4863 iommu_disable_dev_iotlb(info);
4864 domain_context_clear(iommu, info->dev);
4865 intel_pasid_free_table(info->dev);
4868 unlink_domain_info(info);
4870 spin_lock_irqsave(&iommu->lock, flags);
4871 domain_detach_iommu(domain, iommu);
4872 spin_unlock_irqrestore(&iommu->lock, flags);
4874 /* free the private domain */
4875 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
4876 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
4877 domain_exit(info->domain);
4879 free_devinfo_mem(info);
4882 static void dmar_remove_one_dev_info(struct device *dev)
4884 struct device_domain_info *info;
4885 unsigned long flags;
4887 spin_lock_irqsave(&device_domain_lock, flags);
4888 info = dev->archdata.iommu;
4889 __dmar_remove_one_dev_info(info);
4890 spin_unlock_irqrestore(&device_domain_lock, flags);
4893 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4897 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4898 domain_reserve_special_ranges(domain);
4900 /* calculate AGAW */
4901 domain->gaw = guest_width;
4902 adjust_width = guestwidth_to_adjustwidth(guest_width);
4903 domain->agaw = width_to_agaw(adjust_width);
4905 domain->iommu_coherency = 0;
4906 domain->iommu_snooping = 0;
4907 domain->iommu_superpage = 0;
4908 domain->max_addr = 0;
4910 /* always allocate the top pgd */
4911 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4914 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4918 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4920 struct dmar_domain *dmar_domain;
4921 struct iommu_domain *domain;
4924 case IOMMU_DOMAIN_DMA:
4926 case IOMMU_DOMAIN_UNMANAGED:
4927 dmar_domain = alloc_domain(0);
4929 pr_err("Can't allocate dmar_domain\n");
4932 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4933 pr_err("Domain initialization failed\n");
4934 domain_exit(dmar_domain);
4938 if (type == IOMMU_DOMAIN_DMA &&
4939 init_iova_flush_queue(&dmar_domain->iovad,
4940 iommu_flush_iova, iova_entry_free)) {
4941 pr_warn("iova flush queue initialization failed\n");
4942 intel_iommu_strict = 1;
4945 domain_update_iommu_cap(dmar_domain);
4947 domain = &dmar_domain->domain;
4948 domain->geometry.aperture_start = 0;
4949 domain->geometry.aperture_end =
4950 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4951 domain->geometry.force_aperture = true;
4954 case IOMMU_DOMAIN_IDENTITY:
4955 return &si_domain->domain;
4963 static void intel_iommu_domain_free(struct iommu_domain *domain)
4965 if (domain != &si_domain->domain)
4966 domain_exit(to_dmar_domain(domain));
4970 * Check whether a @domain could be attached to the @dev through the
4971 * aux-domain attach/detach APIs.
4974 is_aux_domain(struct device *dev, struct iommu_domain *domain)
4976 struct device_domain_info *info = dev->archdata.iommu;
4978 return info && info->auxd_enabled &&
4979 domain->type == IOMMU_DOMAIN_UNMANAGED;
4982 static void auxiliary_link_device(struct dmar_domain *domain,
4985 struct device_domain_info *info = dev->archdata.iommu;
4987 assert_spin_locked(&device_domain_lock);
4991 domain->auxd_refcnt++;
4992 list_add(&domain->auxd, &info->auxiliary_domains);
4995 static void auxiliary_unlink_device(struct dmar_domain *domain,
4998 struct device_domain_info *info = dev->archdata.iommu;
5000 assert_spin_locked(&device_domain_lock);
5004 list_del(&domain->auxd);
5005 domain->auxd_refcnt--;
5007 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5008 intel_pasid_free_id(domain->default_pasid);
5011 static int aux_domain_add_dev(struct dmar_domain *domain,
5016 unsigned long flags;
5017 struct intel_iommu *iommu;
5019 iommu = device_to_iommu(dev, &bus, &devfn);
5023 if (domain->default_pasid <= 0) {
5026 pasid = intel_pasid_alloc_id(domain, PASID_MIN,
5027 pci_max_pasids(to_pci_dev(dev)),
5030 pr_err("Can't allocate default pasid\n");
5033 domain->default_pasid = pasid;
5036 spin_lock_irqsave(&device_domain_lock, flags);
5038 * iommu->lock must be held to attach domain to iommu and setup the
5039 * pasid entry for second level translation.
5041 spin_lock(&iommu->lock);
5042 ret = domain_attach_iommu(domain, iommu);
5046 /* Setup the PASID entry for mediated devices: */
5047 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5048 domain->default_pasid);
5051 spin_unlock(&iommu->lock);
5053 auxiliary_link_device(domain, dev);
5055 spin_unlock_irqrestore(&device_domain_lock, flags);
5060 domain_detach_iommu(domain, iommu);
5062 spin_unlock(&iommu->lock);
5063 spin_unlock_irqrestore(&device_domain_lock, flags);
5064 if (!domain->auxd_refcnt && domain->default_pasid > 0)
5065 intel_pasid_free_id(domain->default_pasid);
5070 static void aux_domain_remove_dev(struct dmar_domain *domain,
5073 struct device_domain_info *info;
5074 struct intel_iommu *iommu;
5075 unsigned long flags;
5077 if (!is_aux_domain(dev, &domain->domain))
5080 spin_lock_irqsave(&device_domain_lock, flags);
5081 info = dev->archdata.iommu;
5082 iommu = info->iommu;
5084 auxiliary_unlink_device(domain, dev);
5086 spin_lock(&iommu->lock);
5087 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
5088 domain_detach_iommu(domain, iommu);
5089 spin_unlock(&iommu->lock);
5091 spin_unlock_irqrestore(&device_domain_lock, flags);
5094 static int prepare_domain_attach_device(struct iommu_domain *domain,
5097 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5098 struct intel_iommu *iommu;
5102 iommu = device_to_iommu(dev, &bus, &devfn);
5106 /* check if this iommu agaw is sufficient for max mapped address */
5107 addr_width = agaw_to_width(iommu->agaw);
5108 if (addr_width > cap_mgaw(iommu->cap))
5109 addr_width = cap_mgaw(iommu->cap);
5111 if (dmar_domain->max_addr > (1LL << addr_width)) {
5112 dev_err(dev, "%s: iommu width (%d) is not "
5113 "sufficient for the mapped address (%llx)\n",
5114 __func__, addr_width, dmar_domain->max_addr);
5117 dmar_domain->gaw = addr_width;
5120 * Knock out extra levels of page tables if necessary
5122 while (iommu->agaw < dmar_domain->agaw) {
5123 struct dma_pte *pte;
5125 pte = dmar_domain->pgd;
5126 if (dma_pte_present(pte)) {
5127 dmar_domain->pgd = (struct dma_pte *)
5128 phys_to_virt(dma_pte_addr(pte));
5129 free_pgtable_page(pte);
5131 dmar_domain->agaw--;
5137 static int intel_iommu_attach_device(struct iommu_domain *domain,
5142 if (device_is_rmrr_locked(dev)) {
5143 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5147 if (is_aux_domain(dev, domain))
5150 /* normally dev is not mapped */
5151 if (unlikely(domain_context_mapped(dev))) {
5152 struct dmar_domain *old_domain;
5154 old_domain = find_domain(dev);
5156 dmar_remove_one_dev_info(dev);
5159 ret = prepare_domain_attach_device(domain, dev);
5163 return domain_add_dev_info(to_dmar_domain(domain), dev);
5166 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5171 if (!is_aux_domain(dev, domain))
5174 ret = prepare_domain_attach_device(domain, dev);
5178 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5181 static void intel_iommu_detach_device(struct iommu_domain *domain,
5184 dmar_remove_one_dev_info(dev);
5187 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5190 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5193 static int intel_iommu_map(struct iommu_domain *domain,
5194 unsigned long iova, phys_addr_t hpa,
5195 size_t size, int iommu_prot)
5197 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5202 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5205 if (iommu_prot & IOMMU_READ)
5206 prot |= DMA_PTE_READ;
5207 if (iommu_prot & IOMMU_WRITE)
5208 prot |= DMA_PTE_WRITE;
5209 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5210 prot |= DMA_PTE_SNP;
5212 max_addr = iova + size;
5213 if (dmar_domain->max_addr < max_addr) {
5216 /* check if minimum agaw is sufficient for mapped address */
5217 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5218 if (end < max_addr) {
5219 pr_err("%s: iommu width (%d) is not "
5220 "sufficient for the mapped address (%llx)\n",
5221 __func__, dmar_domain->gaw, max_addr);
5224 dmar_domain->max_addr = max_addr;
5226 /* Round up size to next multiple of PAGE_SIZE, if it and
5227 the low bits of hpa would take us onto the next page */
5228 size = aligned_nrpages(hpa, size);
5229 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5230 hpa >> VTD_PAGE_SHIFT, size, prot);
5234 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5235 unsigned long iova, size_t size)
5237 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5238 struct page *freelist = NULL;
5239 unsigned long start_pfn, last_pfn;
5240 unsigned int npages;
5241 int iommu_id, level = 0;
5243 /* Cope with horrid API which requires us to unmap more than the
5244 size argument if it happens to be a large-page mapping. */
5245 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5246 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5249 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5250 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5252 start_pfn = iova >> VTD_PAGE_SHIFT;
5253 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5255 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5257 npages = last_pfn - start_pfn + 1;
5259 for_each_domain_iommu(iommu_id, dmar_domain)
5260 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5261 start_pfn, npages, !freelist, 0);
5263 dma_free_pagelist(freelist);
5265 if (dmar_domain->max_addr == iova + size)
5266 dmar_domain->max_addr = iova;
5271 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5274 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5275 struct dma_pte *pte;
5279 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5282 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5284 phys = dma_pte_addr(pte);
5289 static inline bool scalable_mode_support(void)
5291 struct dmar_drhd_unit *drhd;
5292 struct intel_iommu *iommu;
5296 for_each_active_iommu(iommu, drhd) {
5297 if (!sm_supported(iommu)) {
5307 static inline bool iommu_pasid_support(void)
5309 struct dmar_drhd_unit *drhd;
5310 struct intel_iommu *iommu;
5314 for_each_active_iommu(iommu, drhd) {
5315 if (!pasid_supported(iommu)) {
5325 static bool intel_iommu_capable(enum iommu_cap cap)
5327 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5328 return domain_update_iommu_snooping(NULL) == 1;
5329 if (cap == IOMMU_CAP_INTR_REMAP)
5330 return irq_remapping_enabled == 1;
5335 static int intel_iommu_add_device(struct device *dev)
5337 struct dmar_domain *dmar_domain;
5338 struct iommu_domain *domain;
5339 struct intel_iommu *iommu;
5340 struct iommu_group *group;
5344 iommu = device_to_iommu(dev, &bus, &devfn);
5348 iommu_device_link(&iommu->iommu, dev);
5350 if (translation_pre_enabled(iommu))
5351 dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
5353 group = iommu_group_get_for_dev(dev);
5356 return PTR_ERR(group);
5358 iommu_group_put(group);
5360 domain = iommu_get_domain_for_dev(dev);
5361 dmar_domain = to_dmar_domain(domain);
5362 if (domain->type == IOMMU_DOMAIN_DMA) {
5363 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
5364 ret = iommu_request_dm_for_dev(dev);
5366 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5367 domain_add_dev_info(si_domain, dev);
5369 "Device uses a private identity domain.\n");
5376 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
5377 ret = iommu_request_dma_domain_for_dev(dev);
5379 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5380 if (!get_private_domain_for_dev(dev)) {
5382 "Failed to get a private domain.\n");
5387 "Device uses a private dma domain.\n");
5398 static void intel_iommu_remove_device(struct device *dev)
5400 struct intel_iommu *iommu;
5403 iommu = device_to_iommu(dev, &bus, &devfn);
5407 iommu_group_remove_device(dev);
5409 iommu_device_unlink(&iommu->iommu, dev);
5412 static void intel_iommu_get_resv_regions(struct device *device,
5413 struct list_head *head)
5415 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5416 struct iommu_resv_region *reg;
5417 struct dmar_rmrr_unit *rmrr;
5418 struct device *i_dev;
5421 down_read(&dmar_global_lock);
5422 for_each_rmrr_units(rmrr) {
5423 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5425 struct iommu_resv_region *resv;
5428 if (i_dev != device)
5431 length = rmrr->end_address - rmrr->base_address + 1;
5432 resv = iommu_alloc_resv_region(rmrr->base_address,
5438 list_add_tail(&resv->list, head);
5441 up_read(&dmar_global_lock);
5443 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5444 if (dev_is_pci(device)) {
5445 struct pci_dev *pdev = to_pci_dev(device);
5447 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5448 reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
5451 list_add_tail(®->list, head);
5454 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5456 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5457 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5461 list_add_tail(®->list, head);
5464 static void intel_iommu_put_resv_regions(struct device *dev,
5465 struct list_head *head)
5467 struct iommu_resv_region *entry, *next;
5469 list_for_each_entry_safe(entry, next, head, list)
5473 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5475 struct device_domain_info *info;
5476 struct context_entry *context;
5477 struct dmar_domain *domain;
5478 unsigned long flags;
5482 domain = find_domain(dev);
5486 spin_lock_irqsave(&device_domain_lock, flags);
5487 spin_lock(&iommu->lock);
5490 info = dev->archdata.iommu;
5491 if (!info || !info->pasid_supported)
5494 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5495 if (WARN_ON(!context))
5498 ctx_lo = context[0].lo;
5500 if (!(ctx_lo & CONTEXT_PASIDE)) {
5501 ctx_lo |= CONTEXT_PASIDE;
5502 context[0].lo = ctx_lo;
5504 iommu->flush.flush_context(iommu,
5505 domain->iommu_did[iommu->seq_id],
5506 PCI_DEVID(info->bus, info->devfn),
5507 DMA_CCMD_MASK_NOBIT,
5508 DMA_CCMD_DEVICE_INVL);
5511 /* Enable PASID support in the device, if it wasn't already */
5512 if (!info->pasid_enabled)
5513 iommu_enable_dev_iotlb(info);
5518 spin_unlock(&iommu->lock);
5519 spin_unlock_irqrestore(&device_domain_lock, flags);
5524 static void intel_iommu_apply_resv_region(struct device *dev,
5525 struct iommu_domain *domain,
5526 struct iommu_resv_region *region)
5528 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5529 unsigned long start, end;
5531 start = IOVA_PFN(region->start);
5532 end = IOVA_PFN(region->start + region->length - 1);
5534 WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
5537 #ifdef CONFIG_INTEL_IOMMU_SVM
5538 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5540 struct intel_iommu *iommu;
5543 if (iommu_dummy(dev)) {
5545 "No IOMMU translation for device; cannot enable SVM\n");
5549 iommu = device_to_iommu(dev, &bus, &devfn);
5551 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5557 #endif /* CONFIG_INTEL_IOMMU_SVM */
5559 static int intel_iommu_enable_auxd(struct device *dev)
5561 struct device_domain_info *info;
5562 struct intel_iommu *iommu;
5563 unsigned long flags;
5567 iommu = device_to_iommu(dev, &bus, &devfn);
5568 if (!iommu || dmar_disabled)
5571 if (!sm_supported(iommu) || !pasid_supported(iommu))
5574 ret = intel_iommu_enable_pasid(iommu, dev);
5578 spin_lock_irqsave(&device_domain_lock, flags);
5579 info = dev->archdata.iommu;
5580 info->auxd_enabled = 1;
5581 spin_unlock_irqrestore(&device_domain_lock, flags);
5586 static int intel_iommu_disable_auxd(struct device *dev)
5588 struct device_domain_info *info;
5589 unsigned long flags;
5591 spin_lock_irqsave(&device_domain_lock, flags);
5592 info = dev->archdata.iommu;
5593 if (!WARN_ON(!info))
5594 info->auxd_enabled = 0;
5595 spin_unlock_irqrestore(&device_domain_lock, flags);
5601 * A PCI express designated vendor specific extended capability is defined
5602 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5603 * for system software and tools to detect endpoint devices supporting the
5604 * Intel scalable IO virtualization without host driver dependency.
5606 * Returns the address of the matching extended capability structure within
5607 * the device's PCI configuration space or 0 if the device does not support
5610 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5615 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5617 pci_read_config_word(pdev, pos + 4, &vendor);
5618 pci_read_config_word(pdev, pos + 8, &id);
5619 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5622 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5629 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5631 if (feat == IOMMU_DEV_FEAT_AUX) {
5634 if (!dev_is_pci(dev) || dmar_disabled ||
5635 !scalable_mode_support() || !iommu_pasid_support())
5638 ret = pci_pasid_features(to_pci_dev(dev));
5642 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5649 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5651 if (feat == IOMMU_DEV_FEAT_AUX)
5652 return intel_iommu_enable_auxd(dev);
5658 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5660 if (feat == IOMMU_DEV_FEAT_AUX)
5661 return intel_iommu_disable_auxd(dev);
5667 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5669 struct device_domain_info *info = dev->archdata.iommu;
5671 if (feat == IOMMU_DEV_FEAT_AUX)
5672 return scalable_mode_support() && info && info->auxd_enabled;
5678 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5680 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5682 return dmar_domain->default_pasid > 0 ?
5683 dmar_domain->default_pasid : -EINVAL;
5686 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5689 return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
5692 const struct iommu_ops intel_iommu_ops = {
5693 .capable = intel_iommu_capable,
5694 .domain_alloc = intel_iommu_domain_alloc,
5695 .domain_free = intel_iommu_domain_free,
5696 .attach_dev = intel_iommu_attach_device,
5697 .detach_dev = intel_iommu_detach_device,
5698 .aux_attach_dev = intel_iommu_aux_attach_device,
5699 .aux_detach_dev = intel_iommu_aux_detach_device,
5700 .aux_get_pasid = intel_iommu_aux_get_pasid,
5701 .map = intel_iommu_map,
5702 .unmap = intel_iommu_unmap,
5703 .iova_to_phys = intel_iommu_iova_to_phys,
5704 .add_device = intel_iommu_add_device,
5705 .remove_device = intel_iommu_remove_device,
5706 .get_resv_regions = intel_iommu_get_resv_regions,
5707 .put_resv_regions = intel_iommu_put_resv_regions,
5708 .apply_resv_region = intel_iommu_apply_resv_region,
5709 .device_group = pci_device_group,
5710 .dev_has_feat = intel_iommu_dev_has_feat,
5711 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5712 .dev_enable_feat = intel_iommu_dev_enable_feat,
5713 .dev_disable_feat = intel_iommu_dev_disable_feat,
5714 .is_attach_deferred = intel_iommu_is_attach_deferred,
5715 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5718 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5720 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5721 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5725 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5726 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5727 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5728 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5729 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5730 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5731 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5733 static void quirk_iommu_rwbf(struct pci_dev *dev)
5736 * Mobile 4 Series Chipset neglects to set RWBF capability,
5737 * but needs it. Same seems to hold for the desktop versions.
5739 pci_info(dev, "Forcing write-buffer flush capability\n");
5743 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5744 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5745 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5746 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5747 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5748 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5749 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5752 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5753 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5754 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5755 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5756 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5757 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5758 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5759 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5761 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5765 if (pci_read_config_word(dev, GGC, &ggc))
5768 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5769 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5771 } else if (dmar_map_gfx) {
5772 /* we have to ensure the gfx device is idle before we flush */
5773 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5774 intel_iommu_strict = 1;
5777 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5778 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5779 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5780 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5782 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5783 ISOCH DMAR unit for the Azalia sound device, but not give it any
5784 TLB entries, which causes it to deadlock. Check for that. We do
5785 this in a function called from init_dmars(), instead of in a PCI
5786 quirk, because we don't want to print the obnoxious "BIOS broken"
5787 message if VT-d is actually disabled.
5789 static void __init check_tylersburg_isoch(void)
5791 struct pci_dev *pdev;
5792 uint32_t vtisochctrl;
5794 /* If there's no Azalia in the system anyway, forget it. */
5795 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5800 /* System Management Registers. Might be hidden, in which case
5801 we can't do the sanity check. But that's OK, because the
5802 known-broken BIOSes _don't_ actually hide it, so far. */
5803 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5807 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5814 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5815 if (vtisochctrl & 1)
5818 /* Drop all bits other than the number of TLB entries */
5819 vtisochctrl &= 0x1c;
5821 /* If we have the recommended number of TLB entries (16), fine. */
5822 if (vtisochctrl == 0x10)
5825 /* Zero TLB entries? You get to ride the short bus to school. */
5827 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5828 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5829 dmi_get_system_info(DMI_BIOS_VENDOR),
5830 dmi_get_system_info(DMI_BIOS_VERSION),
5831 dmi_get_system_info(DMI_PRODUCT_VERSION));
5832 iommu_identity_mapping |= IDENTMAP_AZALIA;
5836 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",