2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
53 #include "irq_remapping.h"
55 #define ROOT_SIZE VTD_PAGE_SIZE
56 #define CONTEXT_SIZE VTD_PAGE_SIZE
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
63 #define IOAPIC_RANGE_START (0xfee00000)
64 #define IOAPIC_RANGE_END (0xfeefffff)
65 #define IOVA_START_ADDR (0x1000)
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
72 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN (1)
84 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
88 /* page table handling */
89 #define LEVEL_STRIDE (9)
90 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
108 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110 static inline int agaw_to_level(int agaw)
115 static inline int agaw_to_width(int agaw)
117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
120 static inline int width_to_agaw(int width)
122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
125 static inline unsigned int level_to_offset_bits(int level)
127 return (level - 1) * LEVEL_STRIDE;
130 static inline int pfn_level_offset(unsigned long pfn, int level)
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
135 static inline unsigned long level_mask(int level)
137 return -1UL << level_to_offset_bits(level);
140 static inline unsigned long level_size(int level)
142 return 1UL << level_to_offset_bits(level);
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
147 return (pfn + level_size(level) - 1) & level_mask(level);
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
168 return mm_to_dma_pfn(page_to_pfn(pg));
170 static inline unsigned long virt_to_dma_pfn(void *p)
172 return page_to_dma_pfn(virt_to_page(p));
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
185 static int force_on = 0;
186 int intel_iommu_tboot_noforce;
191 * 12-63: Context Ptr (12 - (haw-1))
198 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
201 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
204 static phys_addr_t root_entry_lctp(struct root_entry *re)
209 return re->lo & VTD_PAGE_MASK;
213 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
216 static phys_addr_t root_entry_uctp(struct root_entry *re)
221 return re->hi & VTD_PAGE_MASK;
226 * 1: fault processing disable
227 * 2-3: translation type
228 * 12-63: address space root
234 struct context_entry {
239 static inline void context_clear_pasid_enable(struct context_entry *context)
241 context->lo &= ~(1ULL << 11);
244 static inline bool context_pasid_enabled(struct context_entry *context)
246 return !!(context->lo & (1ULL << 11));
249 static inline void context_set_copied(struct context_entry *context)
251 context->hi |= (1ull << 3);
254 static inline bool context_copied(struct context_entry *context)
256 return !!(context->hi & (1ULL << 3));
259 static inline bool __context_present(struct context_entry *context)
261 return (context->lo & 1);
264 static inline bool context_present(struct context_entry *context)
266 return context_pasid_enabled(context) ?
267 __context_present(context) :
268 __context_present(context) && !context_copied(context);
271 static inline void context_set_present(struct context_entry *context)
276 static inline void context_set_fault_enable(struct context_entry *context)
278 context->lo &= (((u64)-1) << 2) | 1;
281 static inline void context_set_translation_type(struct context_entry *context,
284 context->lo &= (((u64)-1) << 4) | 3;
285 context->lo |= (value & 3) << 2;
288 static inline void context_set_address_root(struct context_entry *context,
291 context->lo &= ~VTD_PAGE_MASK;
292 context->lo |= value & VTD_PAGE_MASK;
295 static inline void context_set_address_width(struct context_entry *context,
298 context->hi |= value & 7;
301 static inline void context_set_domain_id(struct context_entry *context,
304 context->hi |= (value & ((1 << 16) - 1)) << 8;
307 static inline int context_domain_id(struct context_entry *c)
309 return((c->hi >> 8) & 0xffff);
312 static inline void context_clear_entry(struct context_entry *context)
325 * 12-63: Host physcial address
331 static inline void dma_clear_pte(struct dma_pte *pte)
336 static inline u64 dma_pte_addr(struct dma_pte *pte)
339 return pte->val & VTD_PAGE_MASK;
341 /* Must have a full atomic 64-bit read */
342 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
346 static inline bool dma_pte_present(struct dma_pte *pte)
348 return (pte->val & 3) != 0;
351 static inline bool dma_pte_superpage(struct dma_pte *pte)
353 return (pte->val & DMA_PTE_LARGE_PAGE);
356 static inline int first_pte_in_page(struct dma_pte *pte)
358 return !((unsigned long)pte & ~VTD_PAGE_MASK);
362 * This domain is a statically identity mapping domain.
363 * 1. This domain creats a static 1:1 mapping to all usable memory.
364 * 2. It maps to each iommu if successful.
365 * 3. Each iommu mapps to this domain if successful.
367 static struct dmar_domain *si_domain;
368 static int hw_pass_through = 1;
371 * Domain represents a virtual machine, more than one devices
372 * across iommus may be owned in one domain, e.g. kvm guest.
374 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
376 /* si_domain contains mulitple devices */
377 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
379 #define for_each_domain_iommu(idx, domain) \
380 for (idx = 0; idx < g_num_of_iommus; idx++) \
381 if (domain->iommu_refcnt[idx])
384 int nid; /* node id */
386 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
387 /* Refcount of devices per iommu */
390 u16 iommu_did[DMAR_UNITS_SUPPORTED];
391 /* Domain ids per IOMMU. Use u16 since
392 * domain ids are 16 bit wide according
393 * to VT-d spec, section 9.3 */
395 bool has_iotlb_device;
396 struct list_head devices; /* all devices' list */
397 struct iova_domain iovad; /* iova's that belong to this domain */
399 struct dma_pte *pgd; /* virtual address */
400 int gaw; /* max guest address width */
402 /* adjusted guest address width, 0 is level 2 30-bit */
405 int flags; /* flags to find out type of domain */
407 int iommu_coherency;/* indicate coherency of iommu access */
408 int iommu_snooping; /* indicate snooping control feature*/
409 int iommu_count; /* reference count of iommu */
410 int iommu_superpage;/* Level of superpages supported:
411 0 == 4KiB (no superpages), 1 == 2MiB,
412 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
413 u64 max_addr; /* maximum mapped address */
415 struct iommu_domain domain; /* generic domain data structure for
419 /* PCI domain-device relationship */
420 struct device_domain_info {
421 struct list_head link; /* link to domain siblings */
422 struct list_head global; /* link to global list */
423 u8 bus; /* PCI bus number */
424 u8 devfn; /* PCI devfn number */
425 u8 pasid_supported:3;
432 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
433 struct intel_iommu *iommu; /* IOMMU used by this device */
434 struct dmar_domain *domain; /* pointer to domain */
437 struct dmar_rmrr_unit {
438 struct list_head list; /* list of rmrr units */
439 struct acpi_dmar_header *hdr; /* ACPI header */
440 u64 base_address; /* reserved base address*/
441 u64 end_address; /* reserved end address */
442 struct dmar_dev_scope *devices; /* target devices */
443 int devices_cnt; /* target device count */
444 struct iommu_resv_region *resv; /* reserved region handle */
447 struct dmar_atsr_unit {
448 struct list_head list; /* list of ATSR units */
449 struct acpi_dmar_header *hdr; /* ACPI header */
450 struct dmar_dev_scope *devices; /* target devices */
451 int devices_cnt; /* target device count */
452 u8 include_all:1; /* include all ports */
455 static LIST_HEAD(dmar_atsr_units);
456 static LIST_HEAD(dmar_rmrr_units);
458 #define for_each_rmrr_units(rmrr) \
459 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
461 static void flush_unmaps_timeout(unsigned long data);
463 struct deferred_flush_entry {
464 unsigned long iova_pfn;
465 unsigned long nrpages;
466 struct dmar_domain *domain;
467 struct page *freelist;
470 #define HIGH_WATER_MARK 250
471 struct deferred_flush_table {
473 struct deferred_flush_entry entries[HIGH_WATER_MARK];
476 struct deferred_flush_data {
479 struct timer_list timer;
481 struct deferred_flush_table *tables;
484 static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
486 /* bitmap for indexing intel_iommus */
487 static int g_num_of_iommus;
489 static void domain_exit(struct dmar_domain *domain);
490 static void domain_remove_dev_info(struct dmar_domain *domain);
491 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
493 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
494 static void domain_context_clear(struct intel_iommu *iommu,
496 static int domain_detach_iommu(struct dmar_domain *domain,
497 struct intel_iommu *iommu);
499 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
500 int dmar_disabled = 0;
502 int dmar_disabled = 1;
503 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
505 int intel_iommu_enabled = 0;
506 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
508 static int dmar_map_gfx = 1;
509 static int dmar_forcedac;
510 static int intel_iommu_strict;
511 static int intel_iommu_superpage = 1;
512 static int intel_iommu_ecs = 1;
513 static int intel_iommu_pasid28;
514 static int iommu_identity_mapping;
516 #define IDENTMAP_ALL 1
517 #define IDENTMAP_GFX 2
518 #define IDENTMAP_AZALIA 4
520 /* Broadwell and Skylake have broken ECS support — normal so-called "second
521 * level" translation of DMA requests-without-PASID doesn't actually happen
522 * unless you also set the NESTE bit in an extended context-entry. Which of
523 * course means that SVM doesn't work because it's trying to do nested
524 * translation of the physical addresses it finds in the process page tables,
525 * through the IOVA->phys mapping found in the "second level" page tables.
527 * The VT-d specification was retroactively changed to change the definition
528 * of the capability bits and pretend that Broadwell/Skylake never happened...
529 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
530 * for some reason it was the PASID capability bit which was redefined (from
531 * bit 28 on BDW/SKL to bit 40 in future).
533 * So our test for ECS needs to eschew those implementations which set the old
534 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
535 * Unless we are working around the 'pasid28' limitations, that is, by putting
536 * the device into passthrough mode for normal DMA and thus masking the bug.
538 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
539 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
540 /* PASID support is thus enabled if ECS is enabled and *either* of the old
541 * or new capability bits are set. */
542 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
543 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
545 int intel_iommu_gfx_mapped;
546 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
548 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
549 static DEFINE_SPINLOCK(device_domain_lock);
550 static LIST_HEAD(device_domain_list);
552 const struct iommu_ops intel_iommu_ops;
554 static bool translation_pre_enabled(struct intel_iommu *iommu)
556 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
559 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
561 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
564 static void init_translation_status(struct intel_iommu *iommu)
568 gsts = readl(iommu->reg + DMAR_GSTS_REG);
569 if (gsts & DMA_GSTS_TES)
570 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
573 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
574 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
576 return container_of(dom, struct dmar_domain, domain);
579 static int __init intel_iommu_setup(char *str)
584 if (!strncmp(str, "on", 2)) {
586 pr_info("IOMMU enabled\n");
587 } else if (!strncmp(str, "off", 3)) {
589 pr_info("IOMMU disabled\n");
590 } else if (!strncmp(str, "igfx_off", 8)) {
592 pr_info("Disable GFX device mapping\n");
593 } else if (!strncmp(str, "forcedac", 8)) {
594 pr_info("Forcing DAC for PCI devices\n");
596 } else if (!strncmp(str, "strict", 6)) {
597 pr_info("Disable batched IOTLB flush\n");
598 intel_iommu_strict = 1;
599 } else if (!strncmp(str, "sp_off", 6)) {
600 pr_info("Disable supported super page\n");
601 intel_iommu_superpage = 0;
602 } else if (!strncmp(str, "ecs_off", 7)) {
604 "Intel-IOMMU: disable extended context table support\n");
606 } else if (!strncmp(str, "pasid28", 7)) {
608 "Intel-IOMMU: enable pre-production PASID support\n");
609 intel_iommu_pasid28 = 1;
610 iommu_identity_mapping |= IDENTMAP_GFX;
611 } else if (!strncmp(str, "tboot_noforce", 13)) {
613 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
614 intel_iommu_tboot_noforce = 1;
617 str += strcspn(str, ",");
623 __setup("intel_iommu=", intel_iommu_setup);
625 static struct kmem_cache *iommu_domain_cache;
626 static struct kmem_cache *iommu_devinfo_cache;
628 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
630 struct dmar_domain **domains;
633 domains = iommu->domains[idx];
637 return domains[did & 0xff];
640 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
641 struct dmar_domain *domain)
643 struct dmar_domain **domains;
646 if (!iommu->domains[idx]) {
647 size_t size = 256 * sizeof(struct dmar_domain *);
648 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
651 domains = iommu->domains[idx];
652 if (WARN_ON(!domains))
655 domains[did & 0xff] = domain;
658 static inline void *alloc_pgtable_page(int node)
663 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
665 vaddr = page_address(page);
669 static inline void free_pgtable_page(void *vaddr)
671 free_page((unsigned long)vaddr);
674 static inline void *alloc_domain_mem(void)
676 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
679 static void free_domain_mem(void *vaddr)
681 kmem_cache_free(iommu_domain_cache, vaddr);
684 static inline void * alloc_devinfo_mem(void)
686 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
689 static inline void free_devinfo_mem(void *vaddr)
691 kmem_cache_free(iommu_devinfo_cache, vaddr);
694 static inline int domain_type_is_vm(struct dmar_domain *domain)
696 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
699 static inline int domain_type_is_si(struct dmar_domain *domain)
701 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
704 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
706 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
707 DOMAIN_FLAG_STATIC_IDENTITY);
710 static inline int domain_pfn_supported(struct dmar_domain *domain,
713 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
715 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
718 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
723 sagaw = cap_sagaw(iommu->cap);
724 for (agaw = width_to_agaw(max_gaw);
726 if (test_bit(agaw, &sagaw))
734 * Calculate max SAGAW for each iommu.
736 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
738 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
742 * calculate agaw for each iommu.
743 * "SAGAW" may be different across iommus, use a default agaw, and
744 * get a supported less agaw for iommus that don't support the default agaw.
746 int iommu_calculate_agaw(struct intel_iommu *iommu)
748 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
751 /* This functionin only returns single iommu in a domain */
752 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
756 /* si_domain and vm domain should not get here. */
757 BUG_ON(domain_type_is_vm_or_si(domain));
758 for_each_domain_iommu(iommu_id, domain)
761 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
764 return g_iommus[iommu_id];
767 static void domain_update_iommu_coherency(struct dmar_domain *domain)
769 struct dmar_drhd_unit *drhd;
770 struct intel_iommu *iommu;
774 domain->iommu_coherency = 1;
776 for_each_domain_iommu(i, domain) {
778 if (!ecap_coherent(g_iommus[i]->ecap)) {
779 domain->iommu_coherency = 0;
786 /* No hardware attached; use lowest common denominator */
788 for_each_active_iommu(iommu, drhd) {
789 if (!ecap_coherent(iommu->ecap)) {
790 domain->iommu_coherency = 0;
797 static int domain_update_iommu_snooping(struct intel_iommu *skip)
799 struct dmar_drhd_unit *drhd;
800 struct intel_iommu *iommu;
804 for_each_active_iommu(iommu, drhd) {
806 if (!ecap_sc_support(iommu->ecap)) {
817 static int domain_update_iommu_superpage(struct intel_iommu *skip)
819 struct dmar_drhd_unit *drhd;
820 struct intel_iommu *iommu;
823 if (!intel_iommu_superpage) {
827 /* set iommu_superpage to the smallest common denominator */
829 for_each_active_iommu(iommu, drhd) {
831 mask &= cap_super_page_val(iommu->cap);
841 /* Some capabilities may be different across iommus */
842 static void domain_update_iommu_cap(struct dmar_domain *domain)
844 domain_update_iommu_coherency(domain);
845 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
846 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
849 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
850 u8 bus, u8 devfn, int alloc)
852 struct root_entry *root = &iommu->root_entry[bus];
853 struct context_entry *context;
857 if (ecs_enabled(iommu)) {
865 context = phys_to_virt(*entry & VTD_PAGE_MASK);
867 unsigned long phy_addr;
871 context = alloc_pgtable_page(iommu->node);
875 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
876 phy_addr = virt_to_phys((void *)context);
877 *entry = phy_addr | 1;
878 __iommu_flush_cache(iommu, entry, sizeof(*entry));
880 return &context[devfn];
883 static int iommu_dummy(struct device *dev)
885 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
888 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
890 struct dmar_drhd_unit *drhd = NULL;
891 struct intel_iommu *iommu;
893 struct pci_dev *ptmp, *pdev = NULL;
897 if (iommu_dummy(dev))
900 if (dev_is_pci(dev)) {
901 struct pci_dev *pf_pdev;
903 pdev = to_pci_dev(dev);
904 /* VFs aren't listed in scope tables; we need to look up
905 * the PF instead to find the IOMMU. */
906 pf_pdev = pci_physfn(pdev);
908 segment = pci_domain_nr(pdev->bus);
909 } else if (has_acpi_companion(dev))
910 dev = &ACPI_COMPANION(dev)->dev;
913 for_each_active_iommu(iommu, drhd) {
914 if (pdev && segment != drhd->segment)
917 for_each_active_dev_scope(drhd->devices,
918 drhd->devices_cnt, i, tmp) {
920 /* For a VF use its original BDF# not that of the PF
921 * which we used for the IOMMU lookup. Strictly speaking
922 * we could do this for all PCI devices; we only need to
923 * get the BDF# from the scope table for ACPI matches. */
924 if (pdev && pdev->is_virtfn)
927 *bus = drhd->devices[i].bus;
928 *devfn = drhd->devices[i].devfn;
932 if (!pdev || !dev_is_pci(tmp))
935 ptmp = to_pci_dev(tmp);
936 if (ptmp->subordinate &&
937 ptmp->subordinate->number <= pdev->bus->number &&
938 ptmp->subordinate->busn_res.end >= pdev->bus->number)
942 if (pdev && drhd->include_all) {
944 *bus = pdev->bus->number;
945 *devfn = pdev->devfn;
956 static void domain_flush_cache(struct dmar_domain *domain,
957 void *addr, int size)
959 if (!domain->iommu_coherency)
960 clflush_cache_range(addr, size);
963 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
965 struct context_entry *context;
969 spin_lock_irqsave(&iommu->lock, flags);
970 context = iommu_context_addr(iommu, bus, devfn, 0);
972 ret = context_present(context);
973 spin_unlock_irqrestore(&iommu->lock, flags);
977 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
979 struct context_entry *context;
982 spin_lock_irqsave(&iommu->lock, flags);
983 context = iommu_context_addr(iommu, bus, devfn, 0);
985 context_clear_entry(context);
986 __iommu_flush_cache(iommu, context, sizeof(*context));
988 spin_unlock_irqrestore(&iommu->lock, flags);
991 static void free_context_table(struct intel_iommu *iommu)
995 struct context_entry *context;
997 spin_lock_irqsave(&iommu->lock, flags);
998 if (!iommu->root_entry) {
1001 for (i = 0; i < ROOT_ENTRY_NR; i++) {
1002 context = iommu_context_addr(iommu, i, 0, 0);
1004 free_pgtable_page(context);
1006 if (!ecs_enabled(iommu))
1009 context = iommu_context_addr(iommu, i, 0x80, 0);
1011 free_pgtable_page(context);
1014 free_pgtable_page(iommu->root_entry);
1015 iommu->root_entry = NULL;
1017 spin_unlock_irqrestore(&iommu->lock, flags);
1020 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1021 unsigned long pfn, int *target_level)
1023 struct dma_pte *parent, *pte = NULL;
1024 int level = agaw_to_level(domain->agaw);
1027 BUG_ON(!domain->pgd);
1029 if (!domain_pfn_supported(domain, pfn))
1030 /* Address beyond IOMMU's addressing capabilities. */
1033 parent = domain->pgd;
1038 offset = pfn_level_offset(pfn, level);
1039 pte = &parent[offset];
1040 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1042 if (level == *target_level)
1045 if (!dma_pte_present(pte)) {
1048 tmp_page = alloc_pgtable_page(domain->nid);
1053 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1054 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1055 if (cmpxchg64(&pte->val, 0ULL, pteval))
1056 /* Someone else set it while we were thinking; use theirs. */
1057 free_pgtable_page(tmp_page);
1059 domain_flush_cache(domain, pte, sizeof(*pte));
1064 parent = phys_to_virt(dma_pte_addr(pte));
1069 *target_level = level;
1075 /* return address's pte at specific level */
1076 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1078 int level, int *large_page)
1080 struct dma_pte *parent, *pte = NULL;
1081 int total = agaw_to_level(domain->agaw);
1084 parent = domain->pgd;
1085 while (level <= total) {
1086 offset = pfn_level_offset(pfn, total);
1087 pte = &parent[offset];
1091 if (!dma_pte_present(pte)) {
1092 *large_page = total;
1096 if (dma_pte_superpage(pte)) {
1097 *large_page = total;
1101 parent = phys_to_virt(dma_pte_addr(pte));
1107 /* clear last level pte, a tlb flush should be followed */
1108 static void dma_pte_clear_range(struct dmar_domain *domain,
1109 unsigned long start_pfn,
1110 unsigned long last_pfn)
1112 unsigned int large_page = 1;
1113 struct dma_pte *first_pte, *pte;
1115 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1116 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1117 BUG_ON(start_pfn > last_pfn);
1119 /* we don't need lock here; nobody else touches the iova range */
1122 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1124 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1129 start_pfn += lvl_to_nr_pages(large_page);
1131 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1133 domain_flush_cache(domain, first_pte,
1134 (void *)pte - (void *)first_pte);
1136 } while (start_pfn && start_pfn <= last_pfn);
1139 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1140 int retain_level, struct dma_pte *pte,
1141 unsigned long pfn, unsigned long start_pfn,
1142 unsigned long last_pfn)
1144 pfn = max(start_pfn, pfn);
1145 pte = &pte[pfn_level_offset(pfn, level)];
1148 unsigned long level_pfn;
1149 struct dma_pte *level_pte;
1151 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1154 level_pfn = pfn & level_mask(level);
1155 level_pte = phys_to_virt(dma_pte_addr(pte));
1158 dma_pte_free_level(domain, level - 1, retain_level,
1159 level_pte, level_pfn, start_pfn,
1164 * Free the page table if we're below the level we want to
1165 * retain and the range covers the entire table.
1167 if (level < retain_level && !(start_pfn > level_pfn ||
1168 last_pfn < level_pfn + level_size(level) - 1)) {
1170 domain_flush_cache(domain, pte, sizeof(*pte));
1171 free_pgtable_page(level_pte);
1174 pfn += level_size(level);
1175 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1179 * clear last level (leaf) ptes and free page table pages below the
1180 * level we wish to keep intact.
1182 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1183 unsigned long start_pfn,
1184 unsigned long last_pfn,
1187 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1188 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1189 BUG_ON(start_pfn > last_pfn);
1191 dma_pte_clear_range(domain, start_pfn, last_pfn);
1193 /* We don't need lock here; nobody else touches the iova range */
1194 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1195 domain->pgd, 0, start_pfn, last_pfn);
1198 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1199 free_pgtable_page(domain->pgd);
1204 /* When a page at a given level is being unlinked from its parent, we don't
1205 need to *modify* it at all. All we need to do is make a list of all the
1206 pages which can be freed just as soon as we've flushed the IOTLB and we
1207 know the hardware page-walk will no longer touch them.
1208 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1210 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1211 int level, struct dma_pte *pte,
1212 struct page *freelist)
1216 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1217 pg->freelist = freelist;
1223 pte = page_address(pg);
1225 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1226 freelist = dma_pte_list_pagetables(domain, level - 1,
1229 } while (!first_pte_in_page(pte));
1234 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1235 struct dma_pte *pte, unsigned long pfn,
1236 unsigned long start_pfn,
1237 unsigned long last_pfn,
1238 struct page *freelist)
1240 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1242 pfn = max(start_pfn, pfn);
1243 pte = &pte[pfn_level_offset(pfn, level)];
1246 unsigned long level_pfn;
1248 if (!dma_pte_present(pte))
1251 level_pfn = pfn & level_mask(level);
1253 /* If range covers entire pagetable, free it */
1254 if (start_pfn <= level_pfn &&
1255 last_pfn >= level_pfn + level_size(level) - 1) {
1256 /* These suborbinate page tables are going away entirely. Don't
1257 bother to clear them; we're just going to *free* them. */
1258 if (level > 1 && !dma_pte_superpage(pte))
1259 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1265 } else if (level > 1) {
1266 /* Recurse down into a level that isn't *entirely* obsolete */
1267 freelist = dma_pte_clear_level(domain, level - 1,
1268 phys_to_virt(dma_pte_addr(pte)),
1269 level_pfn, start_pfn, last_pfn,
1273 pfn += level_size(level);
1274 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1277 domain_flush_cache(domain, first_pte,
1278 (void *)++last_pte - (void *)first_pte);
1283 /* We can't just free the pages because the IOMMU may still be walking
1284 the page tables, and may have cached the intermediate levels. The
1285 pages can only be freed after the IOTLB flush has been done. */
1286 static struct page *domain_unmap(struct dmar_domain *domain,
1287 unsigned long start_pfn,
1288 unsigned long last_pfn)
1290 struct page *freelist = NULL;
1292 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1293 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1294 BUG_ON(start_pfn > last_pfn);
1296 /* we don't need lock here; nobody else touches the iova range */
1297 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1298 domain->pgd, 0, start_pfn, last_pfn, NULL);
1301 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1302 struct page *pgd_page = virt_to_page(domain->pgd);
1303 pgd_page->freelist = freelist;
1304 freelist = pgd_page;
1312 static void dma_free_pagelist(struct page *freelist)
1316 while ((pg = freelist)) {
1317 freelist = pg->freelist;
1318 free_pgtable_page(page_address(pg));
1322 /* iommu handling */
1323 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1325 struct root_entry *root;
1326 unsigned long flags;
1328 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1330 pr_err("Allocating root entry for %s failed\n",
1335 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1337 spin_lock_irqsave(&iommu->lock, flags);
1338 iommu->root_entry = root;
1339 spin_unlock_irqrestore(&iommu->lock, flags);
1344 static void iommu_set_root_entry(struct intel_iommu *iommu)
1350 addr = virt_to_phys(iommu->root_entry);
1351 if (ecs_enabled(iommu))
1352 addr |= DMA_RTADDR_RTT;
1354 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1355 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1357 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1359 /* Make sure hardware complete it */
1360 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1361 readl, (sts & DMA_GSTS_RTPS), sts);
1363 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1366 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1371 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1374 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1375 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1377 /* Make sure hardware complete it */
1378 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1379 readl, (!(val & DMA_GSTS_WBFS)), val);
1381 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1384 /* return value determine if we need a write buffer flush */
1385 static void __iommu_flush_context(struct intel_iommu *iommu,
1386 u16 did, u16 source_id, u8 function_mask,
1393 case DMA_CCMD_GLOBAL_INVL:
1394 val = DMA_CCMD_GLOBAL_INVL;
1396 case DMA_CCMD_DOMAIN_INVL:
1397 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1399 case DMA_CCMD_DEVICE_INVL:
1400 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1401 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1406 val |= DMA_CCMD_ICC;
1408 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1409 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1411 /* Make sure hardware complete it */
1412 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1413 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1415 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1418 /* return value determine if we need a write buffer flush */
1419 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1420 u64 addr, unsigned int size_order, u64 type)
1422 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1423 u64 val = 0, val_iva = 0;
1427 case DMA_TLB_GLOBAL_FLUSH:
1428 /* global flush doesn't need set IVA_REG */
1429 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1431 case DMA_TLB_DSI_FLUSH:
1432 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1434 case DMA_TLB_PSI_FLUSH:
1435 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1436 /* IH bit is passed in as part of address */
1437 val_iva = size_order | addr;
1442 /* Note: set drain read/write */
1445 * This is probably to be super secure.. Looks like we can
1446 * ignore it without any impact.
1448 if (cap_read_drain(iommu->cap))
1449 val |= DMA_TLB_READ_DRAIN;
1451 if (cap_write_drain(iommu->cap))
1452 val |= DMA_TLB_WRITE_DRAIN;
1454 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1455 /* Note: Only uses first TLB reg currently */
1457 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1458 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1460 /* Make sure hardware complete it */
1461 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1462 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1464 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1466 /* check IOTLB invalidation granularity */
1467 if (DMA_TLB_IAIG(val) == 0)
1468 pr_err("Flush IOTLB failed\n");
1469 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1470 pr_debug("TLB flush request %Lx, actual %Lx\n",
1471 (unsigned long long)DMA_TLB_IIRG(type),
1472 (unsigned long long)DMA_TLB_IAIG(val));
1475 static struct device_domain_info *
1476 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1479 struct device_domain_info *info;
1481 assert_spin_locked(&device_domain_lock);
1486 list_for_each_entry(info, &domain->devices, link)
1487 if (info->iommu == iommu && info->bus == bus &&
1488 info->devfn == devfn) {
1489 if (info->ats_supported && info->dev)
1497 static void domain_update_iotlb(struct dmar_domain *domain)
1499 struct device_domain_info *info;
1500 bool has_iotlb_device = false;
1502 assert_spin_locked(&device_domain_lock);
1504 list_for_each_entry(info, &domain->devices, link) {
1505 struct pci_dev *pdev;
1507 if (!info->dev || !dev_is_pci(info->dev))
1510 pdev = to_pci_dev(info->dev);
1511 if (pdev->ats_enabled) {
1512 has_iotlb_device = true;
1517 domain->has_iotlb_device = has_iotlb_device;
1520 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1522 struct pci_dev *pdev;
1524 assert_spin_locked(&device_domain_lock);
1526 if (!info || !dev_is_pci(info->dev))
1529 pdev = to_pci_dev(info->dev);
1531 #ifdef CONFIG_INTEL_IOMMU_SVM
1532 /* The PCIe spec, in its wisdom, declares that the behaviour of
1533 the device if you enable PASID support after ATS support is
1534 undefined. So always enable PASID support on devices which
1535 have it, even if we can't yet know if we're ever going to
1537 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1538 info->pasid_enabled = 1;
1540 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1541 info->pri_enabled = 1;
1543 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1544 info->ats_enabled = 1;
1545 domain_update_iotlb(info->domain);
1546 info->ats_qdep = pci_ats_queue_depth(pdev);
1550 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1552 struct pci_dev *pdev;
1554 assert_spin_locked(&device_domain_lock);
1556 if (!dev_is_pci(info->dev))
1559 pdev = to_pci_dev(info->dev);
1561 if (info->ats_enabled) {
1562 pci_disable_ats(pdev);
1563 info->ats_enabled = 0;
1564 domain_update_iotlb(info->domain);
1566 #ifdef CONFIG_INTEL_IOMMU_SVM
1567 if (info->pri_enabled) {
1568 pci_disable_pri(pdev);
1569 info->pri_enabled = 0;
1571 if (info->pasid_enabled) {
1572 pci_disable_pasid(pdev);
1573 info->pasid_enabled = 0;
1578 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1579 u64 addr, unsigned mask)
1582 unsigned long flags;
1583 struct device_domain_info *info;
1585 if (!domain->has_iotlb_device)
1588 spin_lock_irqsave(&device_domain_lock, flags);
1589 list_for_each_entry(info, &domain->devices, link) {
1590 if (!info->ats_enabled)
1593 sid = info->bus << 8 | info->devfn;
1594 qdep = info->ats_qdep;
1595 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1597 spin_unlock_irqrestore(&device_domain_lock, flags);
1600 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1601 struct dmar_domain *domain,
1602 unsigned long pfn, unsigned int pages,
1605 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1606 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1607 u16 did = domain->iommu_did[iommu->seq_id];
1614 * Fallback to domain selective flush if no PSI support or the size is
1616 * PSI requires page size to be 2 ^ x, and the base address is naturally
1617 * aligned to the size
1619 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1620 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1623 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1627 * In caching mode, changes of pages from non-present to present require
1628 * flush. However, device IOTLB doesn't need to be flushed in this case.
1630 if (!cap_caching_mode(iommu->cap) || !map)
1631 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1635 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1638 unsigned long flags;
1640 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1641 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1642 pmen &= ~DMA_PMEN_EPM;
1643 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1645 /* wait for the protected region status bit to clear */
1646 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1647 readl, !(pmen & DMA_PMEN_PRS), pmen);
1649 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1652 static void iommu_enable_translation(struct intel_iommu *iommu)
1655 unsigned long flags;
1657 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1658 iommu->gcmd |= DMA_GCMD_TE;
1659 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1661 /* Make sure hardware complete it */
1662 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1663 readl, (sts & DMA_GSTS_TES), sts);
1665 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1668 static void iommu_disable_translation(struct intel_iommu *iommu)
1673 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1674 iommu->gcmd &= ~DMA_GCMD_TE;
1675 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1677 /* Make sure hardware complete it */
1678 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1679 readl, (!(sts & DMA_GSTS_TES)), sts);
1681 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1685 static int iommu_init_domains(struct intel_iommu *iommu)
1687 u32 ndomains, nlongs;
1690 ndomains = cap_ndoms(iommu->cap);
1691 pr_debug("%s: Number of Domains supported <%d>\n",
1692 iommu->name, ndomains);
1693 nlongs = BITS_TO_LONGS(ndomains);
1695 spin_lock_init(&iommu->lock);
1697 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1698 if (!iommu->domain_ids) {
1699 pr_err("%s: Allocating domain id array failed\n",
1704 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1705 iommu->domains = kzalloc(size, GFP_KERNEL);
1707 if (iommu->domains) {
1708 size = 256 * sizeof(struct dmar_domain *);
1709 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1712 if (!iommu->domains || !iommu->domains[0]) {
1713 pr_err("%s: Allocating domain array failed\n",
1715 kfree(iommu->domain_ids);
1716 kfree(iommu->domains);
1717 iommu->domain_ids = NULL;
1718 iommu->domains = NULL;
1725 * If Caching mode is set, then invalid translations are tagged
1726 * with domain-id 0, hence we need to pre-allocate it. We also
1727 * use domain-id 0 as a marker for non-allocated domain-id, so
1728 * make sure it is not used for a real domain.
1730 set_bit(0, iommu->domain_ids);
1735 static void disable_dmar_iommu(struct intel_iommu *iommu)
1737 struct device_domain_info *info, *tmp;
1738 unsigned long flags;
1740 if (!iommu->domains || !iommu->domain_ids)
1744 spin_lock_irqsave(&device_domain_lock, flags);
1745 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1746 struct dmar_domain *domain;
1748 if (info->iommu != iommu)
1751 if (!info->dev || !info->domain)
1754 domain = info->domain;
1756 __dmar_remove_one_dev_info(info);
1758 if (!domain_type_is_vm_or_si(domain)) {
1760 * The domain_exit() function can't be called under
1761 * device_domain_lock, as it takes this lock itself.
1762 * So release the lock here and re-run the loop
1765 spin_unlock_irqrestore(&device_domain_lock, flags);
1766 domain_exit(domain);
1770 spin_unlock_irqrestore(&device_domain_lock, flags);
1772 if (iommu->gcmd & DMA_GCMD_TE)
1773 iommu_disable_translation(iommu);
1776 static void free_dmar_iommu(struct intel_iommu *iommu)
1778 if ((iommu->domains) && (iommu->domain_ids)) {
1779 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1782 for (i = 0; i < elems; i++)
1783 kfree(iommu->domains[i]);
1784 kfree(iommu->domains);
1785 kfree(iommu->domain_ids);
1786 iommu->domains = NULL;
1787 iommu->domain_ids = NULL;
1790 g_iommus[iommu->seq_id] = NULL;
1792 /* free context mapping */
1793 free_context_table(iommu);
1795 #ifdef CONFIG_INTEL_IOMMU_SVM
1796 if (pasid_enabled(iommu)) {
1797 if (ecap_prs(iommu->ecap))
1798 intel_svm_finish_prq(iommu);
1799 intel_svm_free_pasid_tables(iommu);
1804 static struct dmar_domain *alloc_domain(int flags)
1806 struct dmar_domain *domain;
1808 domain = alloc_domain_mem();
1812 memset(domain, 0, sizeof(*domain));
1814 domain->flags = flags;
1815 domain->has_iotlb_device = false;
1816 INIT_LIST_HEAD(&domain->devices);
1821 /* Must be called with iommu->lock */
1822 static int domain_attach_iommu(struct dmar_domain *domain,
1823 struct intel_iommu *iommu)
1825 unsigned long ndomains;
1828 assert_spin_locked(&device_domain_lock);
1829 assert_spin_locked(&iommu->lock);
1831 domain->iommu_refcnt[iommu->seq_id] += 1;
1832 domain->iommu_count += 1;
1833 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1834 ndomains = cap_ndoms(iommu->cap);
1835 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1837 if (num >= ndomains) {
1838 pr_err("%s: No free domain ids\n", iommu->name);
1839 domain->iommu_refcnt[iommu->seq_id] -= 1;
1840 domain->iommu_count -= 1;
1844 set_bit(num, iommu->domain_ids);
1845 set_iommu_domain(iommu, num, domain);
1847 domain->iommu_did[iommu->seq_id] = num;
1848 domain->nid = iommu->node;
1850 domain_update_iommu_cap(domain);
1856 static int domain_detach_iommu(struct dmar_domain *domain,
1857 struct intel_iommu *iommu)
1859 int num, count = INT_MAX;
1861 assert_spin_locked(&device_domain_lock);
1862 assert_spin_locked(&iommu->lock);
1864 domain->iommu_refcnt[iommu->seq_id] -= 1;
1865 count = --domain->iommu_count;
1866 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1867 num = domain->iommu_did[iommu->seq_id];
1868 clear_bit(num, iommu->domain_ids);
1869 set_iommu_domain(iommu, num, NULL);
1871 domain_update_iommu_cap(domain);
1872 domain->iommu_did[iommu->seq_id] = 0;
1878 static struct iova_domain reserved_iova_list;
1879 static struct lock_class_key reserved_rbtree_key;
1881 static int dmar_init_reserved_ranges(void)
1883 struct pci_dev *pdev = NULL;
1887 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1890 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1891 &reserved_rbtree_key);
1893 /* IOAPIC ranges shouldn't be accessed by DMA */
1894 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1895 IOVA_PFN(IOAPIC_RANGE_END));
1897 pr_err("Reserve IOAPIC range failed\n");
1901 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1902 for_each_pci_dev(pdev) {
1905 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1906 r = &pdev->resource[i];
1907 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1909 iova = reserve_iova(&reserved_iova_list,
1913 pr_err("Reserve iova failed\n");
1921 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1923 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1926 static inline int guestwidth_to_adjustwidth(int gaw)
1929 int r = (gaw - 12) % 9;
1940 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1943 int adjust_width, agaw;
1944 unsigned long sagaw;
1946 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1948 domain_reserve_special_ranges(domain);
1950 /* calculate AGAW */
1951 if (guest_width > cap_mgaw(iommu->cap))
1952 guest_width = cap_mgaw(iommu->cap);
1953 domain->gaw = guest_width;
1954 adjust_width = guestwidth_to_adjustwidth(guest_width);
1955 agaw = width_to_agaw(adjust_width);
1956 sagaw = cap_sagaw(iommu->cap);
1957 if (!test_bit(agaw, &sagaw)) {
1958 /* hardware doesn't support it, choose a bigger one */
1959 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1960 agaw = find_next_bit(&sagaw, 5, agaw);
1964 domain->agaw = agaw;
1966 if (ecap_coherent(iommu->ecap))
1967 domain->iommu_coherency = 1;
1969 domain->iommu_coherency = 0;
1971 if (ecap_sc_support(iommu->ecap))
1972 domain->iommu_snooping = 1;
1974 domain->iommu_snooping = 0;
1976 if (intel_iommu_superpage)
1977 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1979 domain->iommu_superpage = 0;
1981 domain->nid = iommu->node;
1983 /* always allocate the top pgd */
1984 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1987 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1991 static void domain_exit(struct dmar_domain *domain)
1993 struct page *freelist = NULL;
1995 /* Domain 0 is reserved, so dont process it */
1999 /* Flush any lazy unmaps that may reference this domain */
2000 if (!intel_iommu_strict) {
2003 for_each_possible_cpu(cpu)
2004 flush_unmaps_timeout(cpu);
2007 /* Remove associated devices and clear attached or cached domains */
2009 domain_remove_dev_info(domain);
2013 put_iova_domain(&domain->iovad);
2015 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2017 dma_free_pagelist(freelist);
2019 free_domain_mem(domain);
2022 static int domain_context_mapping_one(struct dmar_domain *domain,
2023 struct intel_iommu *iommu,
2026 u16 did = domain->iommu_did[iommu->seq_id];
2027 int translation = CONTEXT_TT_MULTI_LEVEL;
2028 struct device_domain_info *info = NULL;
2029 struct context_entry *context;
2030 unsigned long flags;
2031 struct dma_pte *pgd;
2036 if (hw_pass_through && domain_type_is_si(domain))
2037 translation = CONTEXT_TT_PASS_THROUGH;
2039 pr_debug("Set context mapping for %02x:%02x.%d\n",
2040 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2042 BUG_ON(!domain->pgd);
2044 spin_lock_irqsave(&device_domain_lock, flags);
2045 spin_lock(&iommu->lock);
2048 context = iommu_context_addr(iommu, bus, devfn, 1);
2053 if (context_present(context))
2057 * For kdump cases, old valid entries may be cached due to the
2058 * in-flight DMA and copied pgtable, but there is no unmapping
2059 * behaviour for them, thus we need an explicit cache flush for
2060 * the newly-mapped device. For kdump, at this point, the device
2061 * is supposed to finish reset at its driver probe stage, so no
2062 * in-flight DMA will exist, and we don't need to worry anymore
2065 if (context_copied(context)) {
2066 u16 did_old = context_domain_id(context);
2068 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
2069 iommu->flush.flush_context(iommu, did_old,
2070 (((u16)bus) << 8) | devfn,
2071 DMA_CCMD_MASK_NOBIT,
2072 DMA_CCMD_DEVICE_INVL);
2077 context_clear_entry(context);
2078 context_set_domain_id(context, did);
2081 * Skip top levels of page tables for iommu which has less agaw
2082 * than default. Unnecessary for PT mode.
2084 if (translation != CONTEXT_TT_PASS_THROUGH) {
2085 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2087 pgd = phys_to_virt(dma_pte_addr(pgd));
2088 if (!dma_pte_present(pgd))
2092 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2093 if (info && info->ats_supported)
2094 translation = CONTEXT_TT_DEV_IOTLB;
2096 translation = CONTEXT_TT_MULTI_LEVEL;
2098 context_set_address_root(context, virt_to_phys(pgd));
2099 context_set_address_width(context, iommu->agaw);
2102 * In pass through mode, AW must be programmed to
2103 * indicate the largest AGAW value supported by
2104 * hardware. And ASR is ignored by hardware.
2106 context_set_address_width(context, iommu->msagaw);
2109 context_set_translation_type(context, translation);
2110 context_set_fault_enable(context);
2111 context_set_present(context);
2112 domain_flush_cache(domain, context, sizeof(*context));
2115 * It's a non-present to present mapping. If hardware doesn't cache
2116 * non-present entry we only need to flush the write-buffer. If the
2117 * _does_ cache non-present entries, then it does so in the special
2118 * domain #0, which we have to flush:
2120 if (cap_caching_mode(iommu->cap)) {
2121 iommu->flush.flush_context(iommu, 0,
2122 (((u16)bus) << 8) | devfn,
2123 DMA_CCMD_MASK_NOBIT,
2124 DMA_CCMD_DEVICE_INVL);
2125 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2127 iommu_flush_write_buffer(iommu);
2129 iommu_enable_dev_iotlb(info);
2134 spin_unlock(&iommu->lock);
2135 spin_unlock_irqrestore(&device_domain_lock, flags);
2140 struct domain_context_mapping_data {
2141 struct dmar_domain *domain;
2142 struct intel_iommu *iommu;
2145 static int domain_context_mapping_cb(struct pci_dev *pdev,
2146 u16 alias, void *opaque)
2148 struct domain_context_mapping_data *data = opaque;
2150 return domain_context_mapping_one(data->domain, data->iommu,
2151 PCI_BUS_NUM(alias), alias & 0xff);
2155 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2157 struct intel_iommu *iommu;
2159 struct domain_context_mapping_data data;
2161 iommu = device_to_iommu(dev, &bus, &devfn);
2165 if (!dev_is_pci(dev))
2166 return domain_context_mapping_one(domain, iommu, bus, devfn);
2168 data.domain = domain;
2171 return pci_for_each_dma_alias(to_pci_dev(dev),
2172 &domain_context_mapping_cb, &data);
2175 static int domain_context_mapped_cb(struct pci_dev *pdev,
2176 u16 alias, void *opaque)
2178 struct intel_iommu *iommu = opaque;
2180 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2183 static int domain_context_mapped(struct device *dev)
2185 struct intel_iommu *iommu;
2188 iommu = device_to_iommu(dev, &bus, &devfn);
2192 if (!dev_is_pci(dev))
2193 return device_context_mapped(iommu, bus, devfn);
2195 return !pci_for_each_dma_alias(to_pci_dev(dev),
2196 domain_context_mapped_cb, iommu);
2199 /* Returns a number of VTD pages, but aligned to MM page size */
2200 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2203 host_addr &= ~PAGE_MASK;
2204 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2207 /* Return largest possible superpage level for a given mapping */
2208 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2209 unsigned long iov_pfn,
2210 unsigned long phy_pfn,
2211 unsigned long pages)
2213 int support, level = 1;
2214 unsigned long pfnmerge;
2216 support = domain->iommu_superpage;
2218 /* To use a large page, the virtual *and* physical addresses
2219 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2220 of them will mean we have to use smaller pages. So just
2221 merge them and check both at once. */
2222 pfnmerge = iov_pfn | phy_pfn;
2224 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2225 pages >>= VTD_STRIDE_SHIFT;
2228 pfnmerge >>= VTD_STRIDE_SHIFT;
2235 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2236 struct scatterlist *sg, unsigned long phys_pfn,
2237 unsigned long nr_pages, int prot)
2239 struct dma_pte *first_pte = NULL, *pte = NULL;
2240 phys_addr_t uninitialized_var(pteval);
2241 unsigned long sg_res = 0;
2242 unsigned int largepage_lvl = 0;
2243 unsigned long lvl_pages = 0;
2245 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2247 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2250 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2254 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2257 while (nr_pages > 0) {
2261 sg_res = aligned_nrpages(sg->offset, sg->length);
2262 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2263 sg->dma_length = sg->length;
2264 pteval = page_to_phys(sg_page(sg)) | prot;
2265 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2269 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2271 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2274 /* It is large page*/
2275 if (largepage_lvl > 1) {
2276 unsigned long nr_superpages, end_pfn;
2278 pteval |= DMA_PTE_LARGE_PAGE;
2279 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2281 nr_superpages = sg_res / lvl_pages;
2282 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2285 * Ensure that old small page tables are
2286 * removed to make room for superpage(s).
2287 * We're adding new large pages, so make sure
2288 * we don't remove their parent tables.
2290 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2293 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2297 /* We don't need lock here, nobody else
2298 * touches the iova range
2300 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2302 static int dumps = 5;
2303 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2304 iov_pfn, tmp, (unsigned long long)pteval);
2307 debug_dma_dump_mappings(NULL);
2312 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2314 BUG_ON(nr_pages < lvl_pages);
2315 BUG_ON(sg_res < lvl_pages);
2317 nr_pages -= lvl_pages;
2318 iov_pfn += lvl_pages;
2319 phys_pfn += lvl_pages;
2320 pteval += lvl_pages * VTD_PAGE_SIZE;
2321 sg_res -= lvl_pages;
2323 /* If the next PTE would be the first in a new page, then we
2324 need to flush the cache on the entries we've just written.
2325 And then we'll need to recalculate 'pte', so clear it and
2326 let it get set again in the if (!pte) block above.
2328 If we're done (!nr_pages) we need to flush the cache too.
2330 Also if we've been setting superpages, we may need to
2331 recalculate 'pte' and switch back to smaller pages for the
2332 end of the mapping, if the trailing size is not enough to
2333 use another superpage (i.e. sg_res < lvl_pages). */
2335 if (!nr_pages || first_pte_in_page(pte) ||
2336 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2337 domain_flush_cache(domain, first_pte,
2338 (void *)pte - (void *)first_pte);
2342 if (!sg_res && nr_pages)
2348 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2349 struct scatterlist *sg, unsigned long nr_pages,
2352 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2355 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2356 unsigned long phys_pfn, unsigned long nr_pages,
2359 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2362 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2367 clear_context_table(iommu, bus, devfn);
2368 iommu->flush.flush_context(iommu, 0, 0, 0,
2369 DMA_CCMD_GLOBAL_INVL);
2370 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2373 static inline void unlink_domain_info(struct device_domain_info *info)
2375 assert_spin_locked(&device_domain_lock);
2376 list_del(&info->link);
2377 list_del(&info->global);
2379 info->dev->archdata.iommu = NULL;
2382 static void domain_remove_dev_info(struct dmar_domain *domain)
2384 struct device_domain_info *info, *tmp;
2385 unsigned long flags;
2387 spin_lock_irqsave(&device_domain_lock, flags);
2388 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2389 __dmar_remove_one_dev_info(info);
2390 spin_unlock_irqrestore(&device_domain_lock, flags);
2395 * Note: we use struct device->archdata.iommu stores the info
2397 static struct dmar_domain *find_domain(struct device *dev)
2399 struct device_domain_info *info;
2401 /* No lock here, assumes no domain exit in normal case */
2402 info = dev->archdata.iommu;
2404 return info->domain;
2408 static inline struct device_domain_info *
2409 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2411 struct device_domain_info *info;
2413 list_for_each_entry(info, &device_domain_list, global)
2414 if (info->iommu->segment == segment && info->bus == bus &&
2415 info->devfn == devfn)
2421 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2424 struct dmar_domain *domain)
2426 struct dmar_domain *found = NULL;
2427 struct device_domain_info *info;
2428 unsigned long flags;
2431 info = alloc_devinfo_mem();
2436 info->devfn = devfn;
2437 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2438 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2441 info->domain = domain;
2442 info->iommu = iommu;
2444 if (dev && dev_is_pci(dev)) {
2445 struct pci_dev *pdev = to_pci_dev(info->dev);
2447 if (ecap_dev_iotlb_support(iommu->ecap) &&
2448 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2449 dmar_find_matched_atsr_unit(pdev))
2450 info->ats_supported = 1;
2452 if (ecs_enabled(iommu)) {
2453 if (pasid_enabled(iommu)) {
2454 int features = pci_pasid_features(pdev);
2456 info->pasid_supported = features | 1;
2459 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2460 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2461 info->pri_supported = 1;
2465 spin_lock_irqsave(&device_domain_lock, flags);
2467 found = find_domain(dev);
2470 struct device_domain_info *info2;
2471 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2473 found = info2->domain;
2479 spin_unlock_irqrestore(&device_domain_lock, flags);
2480 free_devinfo_mem(info);
2481 /* Caller must free the original domain */
2485 spin_lock(&iommu->lock);
2486 ret = domain_attach_iommu(domain, iommu);
2487 spin_unlock(&iommu->lock);
2490 spin_unlock_irqrestore(&device_domain_lock, flags);
2491 free_devinfo_mem(info);
2495 list_add(&info->link, &domain->devices);
2496 list_add(&info->global, &device_domain_list);
2498 dev->archdata.iommu = info;
2499 spin_unlock_irqrestore(&device_domain_lock, flags);
2501 if (dev && domain_context_mapping(domain, dev)) {
2502 pr_err("Domain context map for %s failed\n", dev_name(dev));
2503 dmar_remove_one_dev_info(domain, dev);
2510 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2512 *(u16 *)opaque = alias;
2516 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2518 struct device_domain_info *info = NULL;
2519 struct dmar_domain *domain = NULL;
2520 struct intel_iommu *iommu;
2521 u16 req_id, dma_alias;
2522 unsigned long flags;
2525 iommu = device_to_iommu(dev, &bus, &devfn);
2529 req_id = ((u16)bus << 8) | devfn;
2531 if (dev_is_pci(dev)) {
2532 struct pci_dev *pdev = to_pci_dev(dev);
2534 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2536 spin_lock_irqsave(&device_domain_lock, flags);
2537 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2538 PCI_BUS_NUM(dma_alias),
2541 iommu = info->iommu;
2542 domain = info->domain;
2544 spin_unlock_irqrestore(&device_domain_lock, flags);
2546 /* DMA alias already has a domain, use it */
2551 /* Allocate and initialize new domain for the device */
2552 domain = alloc_domain(0);
2555 if (domain_init(domain, iommu, gaw)) {
2556 domain_exit(domain);
2565 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2566 struct dmar_domain *domain)
2568 struct intel_iommu *iommu;
2569 struct dmar_domain *tmp;
2570 u16 req_id, dma_alias;
2573 iommu = device_to_iommu(dev, &bus, &devfn);
2577 req_id = ((u16)bus << 8) | devfn;
2579 if (dev_is_pci(dev)) {
2580 struct pci_dev *pdev = to_pci_dev(dev);
2582 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2584 /* register PCI DMA alias device */
2585 if (req_id != dma_alias) {
2586 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2587 dma_alias & 0xff, NULL, domain);
2589 if (!tmp || tmp != domain)
2594 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2595 if (!tmp || tmp != domain)
2601 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2603 struct dmar_domain *domain, *tmp;
2605 domain = find_domain(dev);
2609 domain = find_or_alloc_domain(dev, gaw);
2613 tmp = set_domain_for_dev(dev, domain);
2614 if (!tmp || domain != tmp) {
2615 domain_exit(domain);
2624 static int iommu_domain_identity_map(struct dmar_domain *domain,
2625 unsigned long long start,
2626 unsigned long long end)
2628 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2629 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2631 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2632 dma_to_mm_pfn(last_vpfn))) {
2633 pr_err("Reserving iova failed\n");
2637 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2639 * RMRR range might have overlap with physical memory range,
2642 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2644 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2645 last_vpfn - first_vpfn + 1,
2646 DMA_PTE_READ|DMA_PTE_WRITE);
2649 static int domain_prepare_identity_map(struct device *dev,
2650 struct dmar_domain *domain,
2651 unsigned long long start,
2652 unsigned long long end)
2654 /* For _hardware_ passthrough, don't bother. But for software
2655 passthrough, we do it anyway -- it may indicate a memory
2656 range which is reserved in E820, so which didn't get set
2657 up to start with in si_domain */
2658 if (domain == si_domain && hw_pass_through) {
2659 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2660 dev_name(dev), start, end);
2664 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2665 dev_name(dev), start, end);
2668 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2669 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2670 dmi_get_system_info(DMI_BIOS_VENDOR),
2671 dmi_get_system_info(DMI_BIOS_VERSION),
2672 dmi_get_system_info(DMI_PRODUCT_VERSION));
2676 if (end >> agaw_to_width(domain->agaw)) {
2677 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2678 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2679 agaw_to_width(domain->agaw),
2680 dmi_get_system_info(DMI_BIOS_VENDOR),
2681 dmi_get_system_info(DMI_BIOS_VERSION),
2682 dmi_get_system_info(DMI_PRODUCT_VERSION));
2686 return iommu_domain_identity_map(domain, start, end);
2689 static int iommu_prepare_identity_map(struct device *dev,
2690 unsigned long long start,
2691 unsigned long long end)
2693 struct dmar_domain *domain;
2696 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2700 ret = domain_prepare_identity_map(dev, domain, start, end);
2702 domain_exit(domain);
2707 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2710 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2712 return iommu_prepare_identity_map(dev, rmrr->base_address,
2716 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2717 static inline void iommu_prepare_isa(void)
2719 struct pci_dev *pdev;
2722 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2726 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2727 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2730 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2735 static inline void iommu_prepare_isa(void)
2739 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2741 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2743 static int __init si_domain_init(int hw)
2747 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2751 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2752 domain_exit(si_domain);
2756 pr_debug("Identity mapping domain allocated\n");
2761 for_each_online_node(nid) {
2762 unsigned long start_pfn, end_pfn;
2765 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2766 ret = iommu_domain_identity_map(si_domain,
2767 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2776 static int identity_mapping(struct device *dev)
2778 struct device_domain_info *info;
2780 if (likely(!iommu_identity_mapping))
2783 info = dev->archdata.iommu;
2784 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2785 return (info->domain == si_domain);
2790 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2792 struct dmar_domain *ndomain;
2793 struct intel_iommu *iommu;
2796 iommu = device_to_iommu(dev, &bus, &devfn);
2800 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2801 if (ndomain != domain)
2807 static bool device_has_rmrr(struct device *dev)
2809 struct dmar_rmrr_unit *rmrr;
2814 for_each_rmrr_units(rmrr) {
2816 * Return TRUE if this RMRR contains the device that
2819 for_each_active_dev_scope(rmrr->devices,
2820 rmrr->devices_cnt, i, tmp)
2831 * There are a couple cases where we need to restrict the functionality of
2832 * devices associated with RMRRs. The first is when evaluating a device for
2833 * identity mapping because problems exist when devices are moved in and out
2834 * of domains and their respective RMRR information is lost. This means that
2835 * a device with associated RMRRs will never be in a "passthrough" domain.
2836 * The second is use of the device through the IOMMU API. This interface
2837 * expects to have full control of the IOVA space for the device. We cannot
2838 * satisfy both the requirement that RMRR access is maintained and have an
2839 * unencumbered IOVA space. We also have no ability to quiesce the device's
2840 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2841 * We therefore prevent devices associated with an RMRR from participating in
2842 * the IOMMU API, which eliminates them from device assignment.
2844 * In both cases we assume that PCI USB devices with RMRRs have them largely
2845 * for historical reasons and that the RMRR space is not actively used post
2846 * boot. This exclusion may change if vendors begin to abuse it.
2848 * The same exception is made for graphics devices, with the requirement that
2849 * any use of the RMRR regions will be torn down before assigning the device
2852 static bool device_is_rmrr_locked(struct device *dev)
2854 if (!device_has_rmrr(dev))
2857 if (dev_is_pci(dev)) {
2858 struct pci_dev *pdev = to_pci_dev(dev);
2860 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2867 static int iommu_should_identity_map(struct device *dev, int startup)
2870 if (dev_is_pci(dev)) {
2871 struct pci_dev *pdev = to_pci_dev(dev);
2873 if (device_is_rmrr_locked(dev))
2876 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2879 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2882 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2886 * We want to start off with all devices in the 1:1 domain, and
2887 * take them out later if we find they can't access all of memory.
2889 * However, we can't do this for PCI devices behind bridges,
2890 * because all PCI devices behind the same bridge will end up
2891 * with the same source-id on their transactions.
2893 * Practically speaking, we can't change things around for these
2894 * devices at run-time, because we can't be sure there'll be no
2895 * DMA transactions in flight for any of their siblings.
2897 * So PCI devices (unless they're on the root bus) as well as
2898 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2899 * the 1:1 domain, just in _case_ one of their siblings turns out
2900 * not to be able to map all of memory.
2902 if (!pci_is_pcie(pdev)) {
2903 if (!pci_is_root_bus(pdev->bus))
2905 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2907 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2910 if (device_has_rmrr(dev))
2915 * At boot time, we don't yet know if devices will be 64-bit capable.
2916 * Assume that they will — if they turn out not to be, then we can
2917 * take them out of the 1:1 domain later.
2921 * If the device's dma_mask is less than the system's memory
2922 * size then this is not a candidate for identity mapping.
2924 u64 dma_mask = *dev->dma_mask;
2926 if (dev->coherent_dma_mask &&
2927 dev->coherent_dma_mask < dma_mask)
2928 dma_mask = dev->coherent_dma_mask;
2930 return dma_mask >= dma_get_required_mask(dev);
2936 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2940 if (!iommu_should_identity_map(dev, 1))
2943 ret = domain_add_dev_info(si_domain, dev);
2945 pr_info("%s identity mapping for device %s\n",
2946 hw ? "Hardware" : "Software", dev_name(dev));
2947 else if (ret == -ENODEV)
2948 /* device not associated with an iommu */
2955 static int __init iommu_prepare_static_identity_mapping(int hw)
2957 struct pci_dev *pdev = NULL;
2958 struct dmar_drhd_unit *drhd;
2959 struct intel_iommu *iommu;
2964 for_each_pci_dev(pdev) {
2965 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2970 for_each_active_iommu(iommu, drhd)
2971 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2972 struct acpi_device_physical_node *pn;
2973 struct acpi_device *adev;
2975 if (dev->bus != &acpi_bus_type)
2978 adev= to_acpi_device(dev);
2979 mutex_lock(&adev->physical_node_lock);
2980 list_for_each_entry(pn, &adev->physical_node_list, node) {
2981 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2985 mutex_unlock(&adev->physical_node_lock);
2993 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2996 * Start from the sane iommu hardware state.
2997 * If the queued invalidation is already initialized by us
2998 * (for example, while enabling interrupt-remapping) then
2999 * we got the things already rolling from a sane state.
3003 * Clear any previous faults.
3005 dmar_fault(-1, iommu);
3007 * Disable queued invalidation if supported and already enabled
3008 * before OS handover.
3010 dmar_disable_qi(iommu);
3013 if (dmar_enable_qi(iommu)) {
3015 * Queued Invalidate not enabled, use Register Based Invalidate
3017 iommu->flush.flush_context = __iommu_flush_context;
3018 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3019 pr_info("%s: Using Register based invalidation\n",
3022 iommu->flush.flush_context = qi_flush_context;
3023 iommu->flush.flush_iotlb = qi_flush_iotlb;
3024 pr_info("%s: Using Queued invalidation\n", iommu->name);
3028 static int copy_context_table(struct intel_iommu *iommu,
3029 struct root_entry *old_re,
3030 struct context_entry **tbl,
3033 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3034 struct context_entry *new_ce = NULL, ce;
3035 struct context_entry *old_ce = NULL;
3036 struct root_entry re;
3037 phys_addr_t old_ce_phys;
3039 tbl_idx = ext ? bus * 2 : bus;
3040 memcpy(&re, old_re, sizeof(re));
3042 for (devfn = 0; devfn < 256; devfn++) {
3043 /* First calculate the correct index */
3044 idx = (ext ? devfn * 2 : devfn) % 256;
3047 /* First save what we may have and clean up */
3049 tbl[tbl_idx] = new_ce;
3050 __iommu_flush_cache(iommu, new_ce,
3060 old_ce_phys = root_entry_lctp(&re);
3062 old_ce_phys = root_entry_uctp(&re);
3065 if (ext && devfn == 0) {
3066 /* No LCTP, try UCTP */
3075 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3080 new_ce = alloc_pgtable_page(iommu->node);
3087 /* Now copy the context entry */
3088 memcpy(&ce, old_ce + idx, sizeof(ce));
3090 if (!__context_present(&ce))
3093 did = context_domain_id(&ce);
3094 if (did >= 0 && did < cap_ndoms(iommu->cap))
3095 set_bit(did, iommu->domain_ids);
3098 * We need a marker for copied context entries. This
3099 * marker needs to work for the old format as well as
3100 * for extended context entries.
3102 * Bit 67 of the context entry is used. In the old
3103 * format this bit is available to software, in the
3104 * extended format it is the PGE bit, but PGE is ignored
3105 * by HW if PASIDs are disabled (and thus still
3108 * So disable PASIDs first and then mark the entry
3109 * copied. This means that we don't copy PASID
3110 * translations from the old kernel, but this is fine as
3111 * faults there are not fatal.
3113 context_clear_pasid_enable(&ce);
3114 context_set_copied(&ce);
3119 tbl[tbl_idx + pos] = new_ce;
3121 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3130 static int copy_translation_tables(struct intel_iommu *iommu)
3132 struct context_entry **ctxt_tbls;
3133 struct root_entry *old_rt;
3134 phys_addr_t old_rt_phys;
3135 int ctxt_table_entries;
3136 unsigned long flags;
3141 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3142 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3143 new_ext = !!ecap_ecs(iommu->ecap);
3146 * The RTT bit can only be changed when translation is disabled,
3147 * but disabling translation means to open a window for data
3148 * corruption. So bail out and don't copy anything if we would
3149 * have to change the bit.
3154 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3158 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3162 /* This is too big for the stack - allocate it from slab */
3163 ctxt_table_entries = ext ? 512 : 256;
3165 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3169 for (bus = 0; bus < 256; bus++) {
3170 ret = copy_context_table(iommu, &old_rt[bus],
3171 ctxt_tbls, bus, ext);
3173 pr_err("%s: Failed to copy context table for bus %d\n",
3179 spin_lock_irqsave(&iommu->lock, flags);
3181 /* Context tables are copied, now write them to the root_entry table */
3182 for (bus = 0; bus < 256; bus++) {
3183 int idx = ext ? bus * 2 : bus;
3186 if (ctxt_tbls[idx]) {
3187 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3188 iommu->root_entry[bus].lo = val;
3191 if (!ext || !ctxt_tbls[idx + 1])
3194 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3195 iommu->root_entry[bus].hi = val;
3198 spin_unlock_irqrestore(&iommu->lock, flags);
3202 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3212 static int __init init_dmars(void)
3214 struct dmar_drhd_unit *drhd;
3215 struct dmar_rmrr_unit *rmrr;
3216 bool copied_tables = false;
3218 struct intel_iommu *iommu;
3224 * initialize and program root entry to not present
3227 for_each_drhd_unit(drhd) {
3229 * lock not needed as this is only incremented in the single
3230 * threaded kernel __init code path all other access are read
3233 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3237 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3240 /* Preallocate enough resources for IOMMU hot-addition */
3241 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3242 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3244 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3247 pr_err("Allocating global iommu array failed\n");
3252 for_each_possible_cpu(cpu) {
3253 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3256 dfd->tables = kzalloc(g_num_of_iommus *
3257 sizeof(struct deferred_flush_table),
3264 spin_lock_init(&dfd->lock);
3265 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
3268 for_each_active_iommu(iommu, drhd) {
3269 g_iommus[iommu->seq_id] = iommu;
3271 intel_iommu_init_qi(iommu);
3273 ret = iommu_init_domains(iommu);
3277 init_translation_status(iommu);
3279 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3280 iommu_disable_translation(iommu);
3281 clear_translation_pre_enabled(iommu);
3282 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3288 * we could share the same root & context tables
3289 * among all IOMMU's. Need to Split it later.
3291 ret = iommu_alloc_root_entry(iommu);
3295 if (translation_pre_enabled(iommu)) {
3296 pr_info("Translation already enabled - trying to copy translation structures\n");
3298 ret = copy_translation_tables(iommu);
3301 * We found the IOMMU with translation
3302 * enabled - but failed to copy over the
3303 * old root-entry table. Try to proceed
3304 * by disabling translation now and
3305 * allocating a clean root-entry table.
3306 * This might cause DMAR faults, but
3307 * probably the dump will still succeed.
3309 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3311 iommu_disable_translation(iommu);
3312 clear_translation_pre_enabled(iommu);
3314 pr_info("Copied translation tables from previous kernel for %s\n",
3316 copied_tables = true;
3320 if (!ecap_pass_through(iommu->ecap))
3321 hw_pass_through = 0;
3322 #ifdef CONFIG_INTEL_IOMMU_SVM
3323 if (pasid_enabled(iommu))
3324 intel_svm_alloc_pasid_tables(iommu);
3329 * Now that qi is enabled on all iommus, set the root entry and flush
3330 * caches. This is required on some Intel X58 chipsets, otherwise the
3331 * flush_context function will loop forever and the boot hangs.
3333 for_each_active_iommu(iommu, drhd) {
3334 iommu_flush_write_buffer(iommu);
3335 iommu_set_root_entry(iommu);
3336 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3337 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3340 if (iommu_pass_through)
3341 iommu_identity_mapping |= IDENTMAP_ALL;
3343 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3344 iommu_identity_mapping |= IDENTMAP_GFX;
3347 check_tylersburg_isoch();
3349 if (iommu_identity_mapping) {
3350 ret = si_domain_init(hw_pass_through);
3357 * If we copied translations from a previous kernel in the kdump
3358 * case, we can not assign the devices to domains now, as that
3359 * would eliminate the old mappings. So skip this part and defer
3360 * the assignment to device driver initialization time.
3366 * If pass through is not set or not enabled, setup context entries for
3367 * identity mappings for rmrr, gfx, and isa and may fall back to static
3368 * identity mapping if iommu_identity_mapping is set.
3370 if (iommu_identity_mapping) {
3371 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3373 pr_crit("Failed to setup IOMMU pass-through\n");
3379 * for each dev attached to rmrr
3381 * locate drhd for dev, alloc domain for dev
3382 * allocate free domain
3383 * allocate page table entries for rmrr
3384 * if context not allocated for bus
3385 * allocate and init context
3386 * set present in root table for this bus
3387 * init context with domain, translation etc
3391 pr_info("Setting RMRR:\n");
3392 for_each_rmrr_units(rmrr) {
3393 /* some BIOS lists non-exist devices in DMAR table. */
3394 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3396 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3398 pr_err("Mapping reserved region failed\n");
3402 iommu_prepare_isa();
3409 * global invalidate context cache
3410 * global invalidate iotlb
3411 * enable translation
3413 for_each_iommu(iommu, drhd) {
3414 if (drhd->ignored) {
3416 * we always have to disable PMRs or DMA may fail on
3420 iommu_disable_protect_mem_regions(iommu);
3424 iommu_flush_write_buffer(iommu);
3426 #ifdef CONFIG_INTEL_IOMMU_SVM
3427 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3428 ret = intel_svm_enable_prq(iommu);
3433 ret = dmar_set_interrupt(iommu);
3437 if (!translation_pre_enabled(iommu))
3438 iommu_enable_translation(iommu);
3440 iommu_disable_protect_mem_regions(iommu);
3446 for_each_active_iommu(iommu, drhd) {
3447 disable_dmar_iommu(iommu);
3448 free_dmar_iommu(iommu);
3451 for_each_possible_cpu(cpu)
3452 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
3458 /* This takes a number of _MM_ pages, not VTD pages */
3459 static unsigned long intel_alloc_iova(struct device *dev,
3460 struct dmar_domain *domain,
3461 unsigned long nrpages, uint64_t dma_mask)
3463 unsigned long iova_pfn = 0;
3465 /* Restrict dma_mask to the width that the iommu can handle */
3466 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3467 /* Ensure we reserve the whole size-aligned region */
3468 nrpages = __roundup_pow_of_two(nrpages);
3470 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3472 * First try to allocate an io virtual address in
3473 * DMA_BIT_MASK(32) and if that fails then try allocating
3476 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3477 IOVA_PFN(DMA_BIT_MASK(32)));
3481 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3482 if (unlikely(!iova_pfn)) {
3483 pr_err("Allocating %ld-page iova for %s failed",
3484 nrpages, dev_name(dev));
3491 static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3493 struct dmar_domain *domain, *tmp;
3494 struct dmar_rmrr_unit *rmrr;
3495 struct device *i_dev;
3498 domain = find_domain(dev);
3502 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3506 /* We have a new domain - setup possible RMRRs for the device */
3508 for_each_rmrr_units(rmrr) {
3509 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3514 ret = domain_prepare_identity_map(dev, domain,
3518 dev_err(dev, "Mapping reserved region failed\n");
3523 tmp = set_domain_for_dev(dev, domain);
3524 if (!tmp || domain != tmp) {
3525 domain_exit(domain);
3532 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3538 /* Check if the dev needs to go through non-identity map and unmap process.*/
3539 static int iommu_no_mapping(struct device *dev)
3543 if (iommu_dummy(dev))
3546 if (!iommu_identity_mapping)
3549 found = identity_mapping(dev);
3551 if (iommu_should_identity_map(dev, 0))
3555 * 32 bit DMA is removed from si_domain and fall back
3556 * to non-identity mapping.
3558 dmar_remove_one_dev_info(si_domain, dev);
3559 pr_info("32bit %s uses non-identity mapping\n",
3565 * In case of a detached 64 bit DMA device from vm, the device
3566 * is put into si_domain for identity mapping.
3568 if (iommu_should_identity_map(dev, 0)) {
3570 ret = domain_add_dev_info(si_domain, dev);
3572 pr_info("64bit %s uses identity mapping\n",
3582 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3583 size_t size, int dir, u64 dma_mask)
3585 struct dmar_domain *domain;
3586 phys_addr_t start_paddr;
3587 unsigned long iova_pfn;
3590 struct intel_iommu *iommu;
3591 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3593 BUG_ON(dir == DMA_NONE);
3595 if (iommu_no_mapping(dev))
3598 domain = get_valid_domain_for_dev(dev);
3602 iommu = domain_get_iommu(domain);
3603 size = aligned_nrpages(paddr, size);
3605 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3610 * Check if DMAR supports zero-length reads on write only
3613 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3614 !cap_zlr(iommu->cap))
3615 prot |= DMA_PTE_READ;
3616 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3617 prot |= DMA_PTE_WRITE;
3619 * paddr - (paddr + size) might be partial page, we should map the whole
3620 * page. Note: if two part of one page are separately mapped, we
3621 * might have two guest_addr mapping to the same host paddr, but this
3622 * is not a big problem
3624 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3625 mm_to_dma_pfn(paddr_pfn), size, prot);
3629 /* it's a non-present to present mapping. Only flush if caching mode */
3630 if (cap_caching_mode(iommu->cap))
3631 iommu_flush_iotlb_psi(iommu, domain,
3632 mm_to_dma_pfn(iova_pfn),
3635 iommu_flush_write_buffer(iommu);
3637 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3638 start_paddr += paddr & ~PAGE_MASK;
3643 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3644 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3645 dev_name(dev), size, (unsigned long long)paddr, dir);
3649 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3650 unsigned long offset, size_t size,
3651 enum dma_data_direction dir,
3652 unsigned long attrs)
3654 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3655 dir, *dev->dma_mask);
3658 static void flush_unmaps(struct deferred_flush_data *flush_data)
3662 flush_data->timer_on = 0;
3664 /* just flush them all */
3665 for (i = 0; i < g_num_of_iommus; i++) {
3666 struct intel_iommu *iommu = g_iommus[i];
3667 struct deferred_flush_table *flush_table =
3668 &flush_data->tables[i];
3672 if (!flush_table->next)
3675 /* In caching mode, global flushes turn emulation expensive */
3676 if (!cap_caching_mode(iommu->cap))
3677 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3678 DMA_TLB_GLOBAL_FLUSH);
3679 for (j = 0; j < flush_table->next; j++) {
3681 struct deferred_flush_entry *entry =
3682 &flush_table->entries[j];
3683 unsigned long iova_pfn = entry->iova_pfn;
3684 unsigned long nrpages = entry->nrpages;
3685 struct dmar_domain *domain = entry->domain;
3686 struct page *freelist = entry->freelist;
3688 /* On real hardware multiple invalidations are expensive */
3689 if (cap_caching_mode(iommu->cap))
3690 iommu_flush_iotlb_psi(iommu, domain,
3691 mm_to_dma_pfn(iova_pfn),
3692 nrpages, !freelist, 0);
3694 mask = ilog2(nrpages);
3695 iommu_flush_dev_iotlb(domain,
3696 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
3698 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3700 dma_free_pagelist(freelist);
3702 flush_table->next = 0;
3705 flush_data->size = 0;
3708 static void flush_unmaps_timeout(unsigned long cpuid)
3710 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3711 unsigned long flags;
3713 spin_lock_irqsave(&flush_data->lock, flags);
3714 flush_unmaps(flush_data);
3715 spin_unlock_irqrestore(&flush_data->lock, flags);
3718 static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3719 unsigned long nrpages, struct page *freelist)
3721 unsigned long flags;
3722 int entry_id, iommu_id;
3723 struct intel_iommu *iommu;
3724 struct deferred_flush_entry *entry;
3725 struct deferred_flush_data *flush_data;
3727 flush_data = raw_cpu_ptr(&deferred_flush);
3729 /* Flush all CPUs' entries to avoid deferring too much. If
3730 * this becomes a bottleneck, can just flush us, and rely on
3731 * flush timer for the rest.
3733 if (flush_data->size == HIGH_WATER_MARK) {
3736 for_each_online_cpu(cpu)
3737 flush_unmaps_timeout(cpu);
3740 spin_lock_irqsave(&flush_data->lock, flags);
3742 iommu = domain_get_iommu(dom);
3743 iommu_id = iommu->seq_id;
3745 entry_id = flush_data->tables[iommu_id].next;
3746 ++(flush_data->tables[iommu_id].next);
3748 entry = &flush_data->tables[iommu_id].entries[entry_id];
3749 entry->domain = dom;
3750 entry->iova_pfn = iova_pfn;
3751 entry->nrpages = nrpages;
3752 entry->freelist = freelist;
3754 if (!flush_data->timer_on) {
3755 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3756 flush_data->timer_on = 1;
3759 spin_unlock_irqrestore(&flush_data->lock, flags);
3762 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3764 struct dmar_domain *domain;
3765 unsigned long start_pfn, last_pfn;
3766 unsigned long nrpages;
3767 unsigned long iova_pfn;
3768 struct intel_iommu *iommu;
3769 struct page *freelist;
3771 if (iommu_no_mapping(dev))
3774 domain = find_domain(dev);
3777 iommu = domain_get_iommu(domain);
3779 iova_pfn = IOVA_PFN(dev_addr);
3781 nrpages = aligned_nrpages(dev_addr, size);
3782 start_pfn = mm_to_dma_pfn(iova_pfn);
3783 last_pfn = start_pfn + nrpages - 1;
3785 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3786 dev_name(dev), start_pfn, last_pfn);
3788 freelist = domain_unmap(domain, start_pfn, last_pfn);
3790 if (intel_iommu_strict) {
3791 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3792 nrpages, !freelist, 0);
3794 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3795 dma_free_pagelist(freelist);
3797 add_unmap(domain, iova_pfn, nrpages, freelist);
3799 * queue up the release of the unmap to save the 1/6th of the
3800 * cpu used up by the iotlb flush operation...
3805 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3806 size_t size, enum dma_data_direction dir,
3807 unsigned long attrs)
3809 intel_unmap(dev, dev_addr, size);
3812 static void *intel_alloc_coherent(struct device *dev, size_t size,
3813 dma_addr_t *dma_handle, gfp_t flags,
3814 unsigned long attrs)
3816 struct page *page = NULL;
3819 size = PAGE_ALIGN(size);
3820 order = get_order(size);
3822 if (!iommu_no_mapping(dev))
3823 flags &= ~(GFP_DMA | GFP_DMA32);
3824 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3825 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3831 if (gfpflags_allow_blocking(flags)) {
3832 unsigned int count = size >> PAGE_SHIFT;
3834 page = dma_alloc_from_contiguous(dev, count, order, flags);
3835 if (page && iommu_no_mapping(dev) &&
3836 page_to_phys(page) + size > dev->coherent_dma_mask) {
3837 dma_release_from_contiguous(dev, page, count);
3843 page = alloc_pages(flags, order);
3846 memset(page_address(page), 0, size);
3848 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3850 dev->coherent_dma_mask);
3852 return page_address(page);
3853 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3854 __free_pages(page, order);
3859 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3860 dma_addr_t dma_handle, unsigned long attrs)
3863 struct page *page = virt_to_page(vaddr);
3865 size = PAGE_ALIGN(size);
3866 order = get_order(size);
3868 intel_unmap(dev, dma_handle, size);
3869 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3870 __free_pages(page, order);
3873 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3874 int nelems, enum dma_data_direction dir,
3875 unsigned long attrs)
3877 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3878 unsigned long nrpages = 0;
3879 struct scatterlist *sg;
3882 for_each_sg(sglist, sg, nelems, i) {
3883 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3886 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3889 static int intel_nontranslate_map_sg(struct device *hddev,
3890 struct scatterlist *sglist, int nelems, int dir)
3893 struct scatterlist *sg;
3895 for_each_sg(sglist, sg, nelems, i) {
3896 BUG_ON(!sg_page(sg));
3897 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3898 sg->dma_length = sg->length;
3903 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3904 enum dma_data_direction dir, unsigned long attrs)
3907 struct dmar_domain *domain;
3910 unsigned long iova_pfn;
3912 struct scatterlist *sg;
3913 unsigned long start_vpfn;
3914 struct intel_iommu *iommu;
3916 BUG_ON(dir == DMA_NONE);
3917 if (iommu_no_mapping(dev))
3918 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3920 domain = get_valid_domain_for_dev(dev);
3924 iommu = domain_get_iommu(domain);
3926 for_each_sg(sglist, sg, nelems, i)
3927 size += aligned_nrpages(sg->offset, sg->length);
3929 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3932 sglist->dma_length = 0;
3937 * Check if DMAR supports zero-length reads on write only
3940 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3941 !cap_zlr(iommu->cap))
3942 prot |= DMA_PTE_READ;
3943 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3944 prot |= DMA_PTE_WRITE;
3946 start_vpfn = mm_to_dma_pfn(iova_pfn);
3948 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3949 if (unlikely(ret)) {
3950 dma_pte_free_pagetable(domain, start_vpfn,
3951 start_vpfn + size - 1,
3952 agaw_to_level(domain->agaw) + 1);
3953 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3957 /* it's a non-present to present mapping. Only flush if caching mode */
3958 if (cap_caching_mode(iommu->cap))
3959 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3961 iommu_flush_write_buffer(iommu);
3966 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3971 const struct dma_map_ops intel_dma_ops = {
3972 .alloc = intel_alloc_coherent,
3973 .free = intel_free_coherent,
3974 .map_sg = intel_map_sg,
3975 .unmap_sg = intel_unmap_sg,
3976 .map_page = intel_map_page,
3977 .unmap_page = intel_unmap_page,
3978 .mapping_error = intel_mapping_error,
3981 static inline int iommu_domain_cache_init(void)
3985 iommu_domain_cache = kmem_cache_create("iommu_domain",
3986 sizeof(struct dmar_domain),
3991 if (!iommu_domain_cache) {
3992 pr_err("Couldn't create iommu_domain cache\n");
3999 static inline int iommu_devinfo_cache_init(void)
4003 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
4004 sizeof(struct device_domain_info),
4008 if (!iommu_devinfo_cache) {
4009 pr_err("Couldn't create devinfo cache\n");
4016 static int __init iommu_init_mempool(void)
4019 ret = iova_cache_get();
4023 ret = iommu_domain_cache_init();
4027 ret = iommu_devinfo_cache_init();
4031 kmem_cache_destroy(iommu_domain_cache);
4038 static void __init iommu_exit_mempool(void)
4040 kmem_cache_destroy(iommu_devinfo_cache);
4041 kmem_cache_destroy(iommu_domain_cache);
4045 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4047 struct dmar_drhd_unit *drhd;
4051 /* We know that this device on this chipset has its own IOMMU.
4052 * If we find it under a different IOMMU, then the BIOS is lying
4053 * to us. Hope that the IOMMU for this device is actually
4054 * disabled, and it needs no translation...
4056 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4058 /* "can't" happen */
4059 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4062 vtbar &= 0xffff0000;
4064 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4065 drhd = dmar_find_matched_drhd_unit(pdev);
4066 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4067 TAINT_FIRMWARE_WORKAROUND,
4068 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4069 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4071 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4073 static void __init init_no_remapping_devices(void)
4075 struct dmar_drhd_unit *drhd;
4079 for_each_drhd_unit(drhd) {
4080 if (!drhd->include_all) {
4081 for_each_active_dev_scope(drhd->devices,
4082 drhd->devices_cnt, i, dev)
4084 /* ignore DMAR unit if no devices exist */
4085 if (i == drhd->devices_cnt)
4090 for_each_active_drhd_unit(drhd) {
4091 if (drhd->include_all)
4094 for_each_active_dev_scope(drhd->devices,
4095 drhd->devices_cnt, i, dev)
4096 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4098 if (i < drhd->devices_cnt)
4101 /* This IOMMU has *only* gfx devices. Either bypass it or
4102 set the gfx_mapped flag, as appropriate */
4104 intel_iommu_gfx_mapped = 1;
4107 for_each_active_dev_scope(drhd->devices,
4108 drhd->devices_cnt, i, dev)
4109 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4114 #ifdef CONFIG_SUSPEND
4115 static int init_iommu_hw(void)
4117 struct dmar_drhd_unit *drhd;
4118 struct intel_iommu *iommu = NULL;
4120 for_each_active_iommu(iommu, drhd)
4122 dmar_reenable_qi(iommu);
4124 for_each_iommu(iommu, drhd) {
4125 if (drhd->ignored) {
4127 * we always have to disable PMRs or DMA may fail on
4131 iommu_disable_protect_mem_regions(iommu);
4135 iommu_flush_write_buffer(iommu);
4137 iommu_set_root_entry(iommu);
4139 iommu->flush.flush_context(iommu, 0, 0, 0,
4140 DMA_CCMD_GLOBAL_INVL);
4141 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4142 iommu_enable_translation(iommu);
4143 iommu_disable_protect_mem_regions(iommu);
4149 static void iommu_flush_all(void)
4151 struct dmar_drhd_unit *drhd;
4152 struct intel_iommu *iommu;
4154 for_each_active_iommu(iommu, drhd) {
4155 iommu->flush.flush_context(iommu, 0, 0, 0,
4156 DMA_CCMD_GLOBAL_INVL);
4157 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4158 DMA_TLB_GLOBAL_FLUSH);
4162 static int iommu_suspend(void)
4164 struct dmar_drhd_unit *drhd;
4165 struct intel_iommu *iommu = NULL;
4168 for_each_active_iommu(iommu, drhd) {
4169 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4171 if (!iommu->iommu_state)
4177 for_each_active_iommu(iommu, drhd) {
4178 iommu_disable_translation(iommu);
4180 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4182 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4183 readl(iommu->reg + DMAR_FECTL_REG);
4184 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4185 readl(iommu->reg + DMAR_FEDATA_REG);
4186 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4187 readl(iommu->reg + DMAR_FEADDR_REG);
4188 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4189 readl(iommu->reg + DMAR_FEUADDR_REG);
4191 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4196 for_each_active_iommu(iommu, drhd)
4197 kfree(iommu->iommu_state);
4202 static void iommu_resume(void)
4204 struct dmar_drhd_unit *drhd;
4205 struct intel_iommu *iommu = NULL;
4208 if (init_iommu_hw()) {
4210 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4212 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4216 for_each_active_iommu(iommu, drhd) {
4218 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4220 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4221 iommu->reg + DMAR_FECTL_REG);
4222 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4223 iommu->reg + DMAR_FEDATA_REG);
4224 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4225 iommu->reg + DMAR_FEADDR_REG);
4226 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4227 iommu->reg + DMAR_FEUADDR_REG);
4229 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4232 for_each_active_iommu(iommu, drhd)
4233 kfree(iommu->iommu_state);
4236 static struct syscore_ops iommu_syscore_ops = {
4237 .resume = iommu_resume,
4238 .suspend = iommu_suspend,
4241 static void __init init_iommu_pm_ops(void)
4243 register_syscore_ops(&iommu_syscore_ops);
4247 static inline void init_iommu_pm_ops(void) {}
4248 #endif /* CONFIG_PM */
4251 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4253 struct acpi_dmar_reserved_memory *rmrr;
4254 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4255 struct dmar_rmrr_unit *rmrru;
4258 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4262 rmrru->hdr = header;
4263 rmrr = (struct acpi_dmar_reserved_memory *)header;
4264 rmrru->base_address = rmrr->base_address;
4265 rmrru->end_address = rmrr->end_address;
4267 length = rmrr->end_address - rmrr->base_address + 1;
4268 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4273 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4274 ((void *)rmrr) + rmrr->header.length,
4275 &rmrru->devices_cnt);
4276 if (rmrru->devices_cnt && rmrru->devices == NULL)
4279 list_add(&rmrru->list, &dmar_rmrr_units);
4290 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4292 struct dmar_atsr_unit *atsru;
4293 struct acpi_dmar_atsr *tmp;
4295 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4296 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4297 if (atsr->segment != tmp->segment)
4299 if (atsr->header.length != tmp->header.length)
4301 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4308 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4310 struct acpi_dmar_atsr *atsr;
4311 struct dmar_atsr_unit *atsru;
4313 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4316 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4317 atsru = dmar_find_atsr(atsr);
4321 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4326 * If memory is allocated from slab by ACPI _DSM method, we need to
4327 * copy the memory content because the memory buffer will be freed
4330 atsru->hdr = (void *)(atsru + 1);
4331 memcpy(atsru->hdr, hdr, hdr->length);
4332 atsru->include_all = atsr->flags & 0x1;
4333 if (!atsru->include_all) {
4334 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4335 (void *)atsr + atsr->header.length,
4336 &atsru->devices_cnt);
4337 if (atsru->devices_cnt && atsru->devices == NULL) {
4343 list_add_rcu(&atsru->list, &dmar_atsr_units);
4348 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4350 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4354 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4356 struct acpi_dmar_atsr *atsr;
4357 struct dmar_atsr_unit *atsru;
4359 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4360 atsru = dmar_find_atsr(atsr);
4362 list_del_rcu(&atsru->list);
4364 intel_iommu_free_atsr(atsru);
4370 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4374 struct acpi_dmar_atsr *atsr;
4375 struct dmar_atsr_unit *atsru;
4377 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4378 atsru = dmar_find_atsr(atsr);
4382 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4383 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4391 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4394 struct intel_iommu *iommu = dmaru->iommu;
4396 if (g_iommus[iommu->seq_id])
4399 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4400 pr_warn("%s: Doesn't support hardware pass through.\n",
4404 if (!ecap_sc_support(iommu->ecap) &&
4405 domain_update_iommu_snooping(iommu)) {
4406 pr_warn("%s: Doesn't support snooping.\n",
4410 sp = domain_update_iommu_superpage(iommu) - 1;
4411 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4412 pr_warn("%s: Doesn't support large page.\n",
4418 * Disable translation if already enabled prior to OS handover.
4420 if (iommu->gcmd & DMA_GCMD_TE)
4421 iommu_disable_translation(iommu);
4423 g_iommus[iommu->seq_id] = iommu;
4424 ret = iommu_init_domains(iommu);
4426 ret = iommu_alloc_root_entry(iommu);
4430 #ifdef CONFIG_INTEL_IOMMU_SVM
4431 if (pasid_enabled(iommu))
4432 intel_svm_alloc_pasid_tables(iommu);
4435 if (dmaru->ignored) {
4437 * we always have to disable PMRs or DMA may fail on this device
4440 iommu_disable_protect_mem_regions(iommu);
4444 intel_iommu_init_qi(iommu);
4445 iommu_flush_write_buffer(iommu);
4447 #ifdef CONFIG_INTEL_IOMMU_SVM
4448 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4449 ret = intel_svm_enable_prq(iommu);
4454 ret = dmar_set_interrupt(iommu);
4458 iommu_set_root_entry(iommu);
4459 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4460 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4461 iommu_enable_translation(iommu);
4463 iommu_disable_protect_mem_regions(iommu);
4467 disable_dmar_iommu(iommu);
4469 free_dmar_iommu(iommu);
4473 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4476 struct intel_iommu *iommu = dmaru->iommu;
4478 if (!intel_iommu_enabled)
4484 ret = intel_iommu_add(dmaru);
4486 disable_dmar_iommu(iommu);
4487 free_dmar_iommu(iommu);
4493 static void intel_iommu_free_dmars(void)
4495 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4496 struct dmar_atsr_unit *atsru, *atsr_n;
4498 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4499 list_del(&rmrru->list);
4500 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4505 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4506 list_del(&atsru->list);
4507 intel_iommu_free_atsr(atsru);
4511 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4514 struct pci_bus *bus;
4515 struct pci_dev *bridge = NULL;
4517 struct acpi_dmar_atsr *atsr;
4518 struct dmar_atsr_unit *atsru;
4520 dev = pci_physfn(dev);
4521 for (bus = dev->bus; bus; bus = bus->parent) {
4523 /* If it's an integrated device, allow ATS */
4526 /* Connected via non-PCIe: no ATS */
4527 if (!pci_is_pcie(bridge) ||
4528 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4530 /* If we found the root port, look it up in the ATSR */
4531 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4536 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4537 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4538 if (atsr->segment != pci_domain_nr(dev->bus))
4541 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4542 if (tmp == &bridge->dev)
4545 if (atsru->include_all)
4555 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4558 struct dmar_rmrr_unit *rmrru;
4559 struct dmar_atsr_unit *atsru;
4560 struct acpi_dmar_atsr *atsr;
4561 struct acpi_dmar_reserved_memory *rmrr;
4563 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4566 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4567 rmrr = container_of(rmrru->hdr,
4568 struct acpi_dmar_reserved_memory, header);
4569 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4570 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4571 ((void *)rmrr) + rmrr->header.length,
4572 rmrr->segment, rmrru->devices,
4573 rmrru->devices_cnt);
4576 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4577 dmar_remove_dev_scope(info, rmrr->segment,
4578 rmrru->devices, rmrru->devices_cnt);
4582 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4583 if (atsru->include_all)
4586 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4587 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4588 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4589 (void *)atsr + atsr->header.length,
4590 atsr->segment, atsru->devices,
4591 atsru->devices_cnt);
4596 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4597 if (dmar_remove_dev_scope(info, atsr->segment,
4598 atsru->devices, atsru->devices_cnt))
4607 * Here we only respond to action of unbound device from driver.
4609 * Added device is not attached to its DMAR domain here yet. That will happen
4610 * when mapping the device to iova.
4612 static int device_notifier(struct notifier_block *nb,
4613 unsigned long action, void *data)
4615 struct device *dev = data;
4616 struct dmar_domain *domain;
4618 if (iommu_dummy(dev))
4621 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4624 domain = find_domain(dev);
4628 dmar_remove_one_dev_info(domain, dev);
4629 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4630 domain_exit(domain);
4635 static struct notifier_block device_nb = {
4636 .notifier_call = device_notifier,
4639 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4640 unsigned long val, void *v)
4642 struct memory_notify *mhp = v;
4643 unsigned long long start, end;
4644 unsigned long start_vpfn, last_vpfn;
4647 case MEM_GOING_ONLINE:
4648 start = mhp->start_pfn << PAGE_SHIFT;
4649 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4650 if (iommu_domain_identity_map(si_domain, start, end)) {
4651 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4658 case MEM_CANCEL_ONLINE:
4659 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4660 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4661 while (start_vpfn <= last_vpfn) {
4663 struct dmar_drhd_unit *drhd;
4664 struct intel_iommu *iommu;
4665 struct page *freelist;
4667 iova = find_iova(&si_domain->iovad, start_vpfn);
4669 pr_debug("Failed get IOVA for PFN %lx\n",
4674 iova = split_and_remove_iova(&si_domain->iovad, iova,
4675 start_vpfn, last_vpfn);
4677 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4678 start_vpfn, last_vpfn);
4682 freelist = domain_unmap(si_domain, iova->pfn_lo,
4686 for_each_active_iommu(iommu, drhd)
4687 iommu_flush_iotlb_psi(iommu, si_domain,
4688 iova->pfn_lo, iova_size(iova),
4691 dma_free_pagelist(freelist);
4693 start_vpfn = iova->pfn_hi + 1;
4694 free_iova_mem(iova);
4702 static struct notifier_block intel_iommu_memory_nb = {
4703 .notifier_call = intel_iommu_memory_notifier,
4707 static void free_all_cpu_cached_iovas(unsigned int cpu)
4711 for (i = 0; i < g_num_of_iommus; i++) {
4712 struct intel_iommu *iommu = g_iommus[i];
4713 struct dmar_domain *domain;
4719 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4720 domain = get_iommu_domain(iommu, (u16)did);
4724 free_cpu_cached_iovas(cpu, &domain->iovad);
4729 static int intel_iommu_cpu_dead(unsigned int cpu)
4731 free_all_cpu_cached_iovas(cpu);
4732 flush_unmaps_timeout(cpu);
4736 static void intel_disable_iommus(void)
4738 struct intel_iommu *iommu = NULL;
4739 struct dmar_drhd_unit *drhd;
4741 for_each_iommu(iommu, drhd)
4742 iommu_disable_translation(iommu);
4745 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4747 return container_of(dev, struct intel_iommu, iommu.dev);
4750 static ssize_t intel_iommu_show_version(struct device *dev,
4751 struct device_attribute *attr,
4754 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4755 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4756 return sprintf(buf, "%d:%d\n",
4757 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4759 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4761 static ssize_t intel_iommu_show_address(struct device *dev,
4762 struct device_attribute *attr,
4765 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4766 return sprintf(buf, "%llx\n", iommu->reg_phys);
4768 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4770 static ssize_t intel_iommu_show_cap(struct device *dev,
4771 struct device_attribute *attr,
4774 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4775 return sprintf(buf, "%llx\n", iommu->cap);
4777 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4779 static ssize_t intel_iommu_show_ecap(struct device *dev,
4780 struct device_attribute *attr,
4783 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4784 return sprintf(buf, "%llx\n", iommu->ecap);
4786 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4788 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4789 struct device_attribute *attr,
4792 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4793 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4795 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4797 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4798 struct device_attribute *attr,
4801 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4802 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4803 cap_ndoms(iommu->cap)));
4805 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4807 static struct attribute *intel_iommu_attrs[] = {
4808 &dev_attr_version.attr,
4809 &dev_attr_address.attr,
4811 &dev_attr_ecap.attr,
4812 &dev_attr_domains_supported.attr,
4813 &dev_attr_domains_used.attr,
4817 static struct attribute_group intel_iommu_group = {
4818 .name = "intel-iommu",
4819 .attrs = intel_iommu_attrs,
4822 const struct attribute_group *intel_iommu_groups[] = {
4827 int __init intel_iommu_init(void)
4830 struct dmar_drhd_unit *drhd;
4831 struct intel_iommu *iommu;
4833 /* VT-d is required for a TXT/tboot launch, so enforce that */
4834 force_on = tboot_force_iommu();
4836 if (iommu_init_mempool()) {
4838 panic("tboot: Failed to initialize iommu memory\n");
4842 down_write(&dmar_global_lock);
4843 if (dmar_table_init()) {
4845 panic("tboot: Failed to initialize DMAR table\n");
4849 if (dmar_dev_scope_init() < 0) {
4851 panic("tboot: Failed to initialize DMAR device scope\n");
4855 if (no_iommu || dmar_disabled) {
4857 * We exit the function here to ensure IOMMU's remapping and
4858 * mempool aren't setup, which means that the IOMMU's PMRs
4859 * won't be disabled via the call to init_dmars(). So disable
4860 * it explicitly here. The PMRs were setup by tboot prior to
4861 * calling SENTER, but the kernel is expected to reset/tear
4864 if (intel_iommu_tboot_noforce) {
4865 for_each_iommu(iommu, drhd)
4866 iommu_disable_protect_mem_regions(iommu);
4870 * Make sure the IOMMUs are switched off, even when we
4871 * boot into a kexec kernel and the previous kernel left
4874 intel_disable_iommus();
4878 if (list_empty(&dmar_rmrr_units))
4879 pr_info("No RMRR found\n");
4881 if (list_empty(&dmar_atsr_units))
4882 pr_info("No ATSR found\n");
4884 if (dmar_init_reserved_ranges()) {
4886 panic("tboot: Failed to reserve iommu ranges\n");
4887 goto out_free_reserved_range;
4890 init_no_remapping_devices();
4895 panic("tboot: Failed to initialize DMARs\n");
4896 pr_err("Initialization failed\n");
4897 goto out_free_reserved_range;
4899 up_write(&dmar_global_lock);
4900 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4902 #ifdef CONFIG_SWIOTLB
4905 dma_ops = &intel_dma_ops;
4907 init_iommu_pm_ops();
4909 for_each_active_iommu(iommu, drhd) {
4910 iommu_device_sysfs_add(&iommu->iommu, NULL,
4913 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4914 iommu_device_register(&iommu->iommu);
4917 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4918 bus_register_notifier(&pci_bus_type, &device_nb);
4919 if (si_domain && !hw_pass_through)
4920 register_memory_notifier(&intel_iommu_memory_nb);
4921 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4922 intel_iommu_cpu_dead);
4923 intel_iommu_enabled = 1;
4927 out_free_reserved_range:
4928 put_iova_domain(&reserved_iova_list);
4930 intel_iommu_free_dmars();
4931 up_write(&dmar_global_lock);
4932 iommu_exit_mempool();
4936 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4938 struct intel_iommu *iommu = opaque;
4940 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4945 * NB - intel-iommu lacks any sort of reference counting for the users of
4946 * dependent devices. If multiple endpoints have intersecting dependent
4947 * devices, unbinding the driver from any one of them will possibly leave
4948 * the others unable to operate.
4950 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4952 if (!iommu || !dev || !dev_is_pci(dev))
4955 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4958 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4960 struct intel_iommu *iommu;
4961 unsigned long flags;
4963 assert_spin_locked(&device_domain_lock);
4968 iommu = info->iommu;
4971 iommu_disable_dev_iotlb(info);
4972 domain_context_clear(iommu, info->dev);
4975 unlink_domain_info(info);
4977 spin_lock_irqsave(&iommu->lock, flags);
4978 domain_detach_iommu(info->domain, iommu);
4979 spin_unlock_irqrestore(&iommu->lock, flags);
4981 free_devinfo_mem(info);
4984 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4987 struct device_domain_info *info;
4988 unsigned long flags;
4990 spin_lock_irqsave(&device_domain_lock, flags);
4991 info = dev->archdata.iommu;
4992 __dmar_remove_one_dev_info(info);
4993 spin_unlock_irqrestore(&device_domain_lock, flags);
4996 static int md_domain_init(struct dmar_domain *domain, int guest_width)
5000 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
5002 domain_reserve_special_ranges(domain);
5004 /* calculate AGAW */
5005 domain->gaw = guest_width;
5006 adjust_width = guestwidth_to_adjustwidth(guest_width);
5007 domain->agaw = width_to_agaw(adjust_width);
5009 domain->iommu_coherency = 0;
5010 domain->iommu_snooping = 0;
5011 domain->iommu_superpage = 0;
5012 domain->max_addr = 0;
5014 /* always allocate the top pgd */
5015 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5018 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5022 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5024 struct dmar_domain *dmar_domain;
5025 struct iommu_domain *domain;
5027 if (type != IOMMU_DOMAIN_UNMANAGED)
5030 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
5032 pr_err("Can't allocate dmar_domain\n");
5035 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5036 pr_err("Domain initialization failed\n");
5037 domain_exit(dmar_domain);
5040 domain_update_iommu_cap(dmar_domain);
5042 domain = &dmar_domain->domain;
5043 domain->geometry.aperture_start = 0;
5044 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5045 domain->geometry.force_aperture = true;
5050 static void intel_iommu_domain_free(struct iommu_domain *domain)
5052 domain_exit(to_dmar_domain(domain));
5055 static int intel_iommu_attach_device(struct iommu_domain *domain,
5058 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5059 struct intel_iommu *iommu;
5063 if (device_is_rmrr_locked(dev)) {
5064 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5068 /* normally dev is not mapped */
5069 if (unlikely(domain_context_mapped(dev))) {
5070 struct dmar_domain *old_domain;
5072 old_domain = find_domain(dev);
5075 dmar_remove_one_dev_info(old_domain, dev);
5078 if (!domain_type_is_vm_or_si(old_domain) &&
5079 list_empty(&old_domain->devices))
5080 domain_exit(old_domain);
5084 iommu = device_to_iommu(dev, &bus, &devfn);
5088 /* check if this iommu agaw is sufficient for max mapped address */
5089 addr_width = agaw_to_width(iommu->agaw);
5090 if (addr_width > cap_mgaw(iommu->cap))
5091 addr_width = cap_mgaw(iommu->cap);
5093 if (dmar_domain->max_addr > (1LL << addr_width)) {
5094 pr_err("%s: iommu width (%d) is not "
5095 "sufficient for the mapped address (%llx)\n",
5096 __func__, addr_width, dmar_domain->max_addr);
5099 dmar_domain->gaw = addr_width;
5102 * Knock out extra levels of page tables if necessary
5104 while (iommu->agaw < dmar_domain->agaw) {
5105 struct dma_pte *pte;
5107 pte = dmar_domain->pgd;
5108 if (dma_pte_present(pte)) {
5109 dmar_domain->pgd = (struct dma_pte *)
5110 phys_to_virt(dma_pte_addr(pte));
5111 free_pgtable_page(pte);
5113 dmar_domain->agaw--;
5116 return domain_add_dev_info(dmar_domain, dev);
5119 static void intel_iommu_detach_device(struct iommu_domain *domain,
5122 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5125 static int intel_iommu_map(struct iommu_domain *domain,
5126 unsigned long iova, phys_addr_t hpa,
5127 size_t size, int iommu_prot)
5129 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5134 if (iommu_prot & IOMMU_READ)
5135 prot |= DMA_PTE_READ;
5136 if (iommu_prot & IOMMU_WRITE)
5137 prot |= DMA_PTE_WRITE;
5138 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5139 prot |= DMA_PTE_SNP;
5141 max_addr = iova + size;
5142 if (dmar_domain->max_addr < max_addr) {
5145 /* check if minimum agaw is sufficient for mapped address */
5146 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5147 if (end < max_addr) {
5148 pr_err("%s: iommu width (%d) is not "
5149 "sufficient for the mapped address (%llx)\n",
5150 __func__, dmar_domain->gaw, max_addr);
5153 dmar_domain->max_addr = max_addr;
5155 /* Round up size to next multiple of PAGE_SIZE, if it and
5156 the low bits of hpa would take us onto the next page */
5157 size = aligned_nrpages(hpa, size);
5158 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5159 hpa >> VTD_PAGE_SHIFT, size, prot);
5163 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5164 unsigned long iova, size_t size)
5166 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5167 struct page *freelist = NULL;
5168 struct intel_iommu *iommu;
5169 unsigned long start_pfn, last_pfn;
5170 unsigned int npages;
5171 int iommu_id, level = 0;
5173 /* Cope with horrid API which requires us to unmap more than the
5174 size argument if it happens to be a large-page mapping. */
5175 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5177 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5178 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5180 start_pfn = iova >> VTD_PAGE_SHIFT;
5181 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5183 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5185 npages = last_pfn - start_pfn + 1;
5187 for_each_domain_iommu(iommu_id, dmar_domain) {
5188 iommu = g_iommus[iommu_id];
5190 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5191 start_pfn, npages, !freelist, 0);
5194 dma_free_pagelist(freelist);
5196 if (dmar_domain->max_addr == iova + size)
5197 dmar_domain->max_addr = iova;
5202 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5205 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5206 struct dma_pte *pte;
5210 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5212 phys = dma_pte_addr(pte);
5217 static bool intel_iommu_capable(enum iommu_cap cap)
5219 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5220 return domain_update_iommu_snooping(NULL) == 1;
5221 if (cap == IOMMU_CAP_INTR_REMAP)
5222 return irq_remapping_enabled == 1;
5227 static int intel_iommu_add_device(struct device *dev)
5229 struct intel_iommu *iommu;
5230 struct iommu_group *group;
5233 iommu = device_to_iommu(dev, &bus, &devfn);
5237 iommu_device_link(&iommu->iommu, dev);
5239 group = iommu_group_get_for_dev(dev);
5242 return PTR_ERR(group);
5244 iommu_group_put(group);
5248 static void intel_iommu_remove_device(struct device *dev)
5250 struct intel_iommu *iommu;
5253 iommu = device_to_iommu(dev, &bus, &devfn);
5257 iommu_group_remove_device(dev);
5259 iommu_device_unlink(&iommu->iommu, dev);
5262 static void intel_iommu_get_resv_regions(struct device *device,
5263 struct list_head *head)
5265 struct iommu_resv_region *reg;
5266 struct dmar_rmrr_unit *rmrr;
5267 struct device *i_dev;
5271 for_each_rmrr_units(rmrr) {
5272 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5274 if (i_dev != device)
5277 list_add_tail(&rmrr->resv->list, head);
5282 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5283 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5287 list_add_tail(®->list, head);
5290 static void intel_iommu_put_resv_regions(struct device *dev,
5291 struct list_head *head)
5293 struct iommu_resv_region *entry, *next;
5295 list_for_each_entry_safe(entry, next, head, list) {
5296 if (entry->type == IOMMU_RESV_RESERVED)
5301 #ifdef CONFIG_INTEL_IOMMU_SVM
5302 #define MAX_NR_PASID_BITS (20)
5303 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5306 * Convert ecap_pss to extend context entry pts encoding, also
5307 * respect the soft pasid_max value set by the iommu.
5308 * - number of PASID bits = ecap_pss + 1
5309 * - number of PASID table entries = 2^(pts + 5)
5310 * Therefore, pts = ecap_pss - 4
5311 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5313 if (ecap_pss(iommu->ecap) < 5)
5316 /* pasid_max is encoded as actual number of entries not the bits */
5317 return find_first_bit((unsigned long *)&iommu->pasid_max,
5318 MAX_NR_PASID_BITS) - 5;
5321 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5323 struct device_domain_info *info;
5324 struct context_entry *context;
5325 struct dmar_domain *domain;
5326 unsigned long flags;
5330 domain = get_valid_domain_for_dev(sdev->dev);
5334 spin_lock_irqsave(&device_domain_lock, flags);
5335 spin_lock(&iommu->lock);
5338 info = sdev->dev->archdata.iommu;
5339 if (!info || !info->pasid_supported)
5342 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5343 if (WARN_ON(!context))
5346 ctx_lo = context[0].lo;
5348 sdev->did = domain->iommu_did[iommu->seq_id];
5349 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5351 if (!(ctx_lo & CONTEXT_PASIDE)) {
5352 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5353 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5354 intel_iommu_get_pts(iommu);
5357 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5358 * extended to permit requests-with-PASID if the PASIDE bit
5359 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5360 * however, the PASIDE bit is ignored and requests-with-PASID
5361 * are unconditionally blocked. Which makes less sense.
5362 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5363 * "guest mode" translation types depending on whether ATS
5364 * is available or not. Annoyingly, we can't use the new
5365 * modes *unless* PASIDE is set. */
5366 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5367 ctx_lo &= ~CONTEXT_TT_MASK;
5368 if (info->ats_supported)
5369 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5371 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5373 ctx_lo |= CONTEXT_PASIDE;
5374 if (iommu->pasid_state_table)
5375 ctx_lo |= CONTEXT_DINVE;
5376 if (info->pri_supported)
5377 ctx_lo |= CONTEXT_PRS;
5378 context[0].lo = ctx_lo;
5380 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5381 DMA_CCMD_MASK_NOBIT,
5382 DMA_CCMD_DEVICE_INVL);
5385 /* Enable PASID support in the device, if it wasn't already */
5386 if (!info->pasid_enabled)
5387 iommu_enable_dev_iotlb(info);
5389 if (info->ats_enabled) {
5390 sdev->dev_iotlb = 1;
5391 sdev->qdep = info->ats_qdep;
5392 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5398 spin_unlock(&iommu->lock);
5399 spin_unlock_irqrestore(&device_domain_lock, flags);
5404 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5406 struct intel_iommu *iommu;
5409 if (iommu_dummy(dev)) {
5411 "No IOMMU translation for device; cannot enable SVM\n");
5415 iommu = device_to_iommu(dev, &bus, &devfn);
5417 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5421 if (!iommu->pasid_table) {
5422 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5428 #endif /* CONFIG_INTEL_IOMMU_SVM */
5430 const struct iommu_ops intel_iommu_ops = {
5431 .capable = intel_iommu_capable,
5432 .domain_alloc = intel_iommu_domain_alloc,
5433 .domain_free = intel_iommu_domain_free,
5434 .attach_dev = intel_iommu_attach_device,
5435 .detach_dev = intel_iommu_detach_device,
5436 .map = intel_iommu_map,
5437 .unmap = intel_iommu_unmap,
5438 .map_sg = default_iommu_map_sg,
5439 .iova_to_phys = intel_iommu_iova_to_phys,
5440 .add_device = intel_iommu_add_device,
5441 .remove_device = intel_iommu_remove_device,
5442 .get_resv_regions = intel_iommu_get_resv_regions,
5443 .put_resv_regions = intel_iommu_put_resv_regions,
5444 .device_group = pci_device_group,
5445 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5448 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5450 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5451 pr_info("Disabling IOMMU for graphics on this chipset\n");
5455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5456 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5457 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5458 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5459 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5460 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5463 static void quirk_iommu_rwbf(struct pci_dev *dev)
5466 * Mobile 4 Series Chipset neglects to set RWBF capability,
5467 * but needs it. Same seems to hold for the desktop versions.
5469 pr_info("Forcing write-buffer flush capability\n");
5473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5474 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5475 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5476 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5477 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5478 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5479 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5482 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5483 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5484 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5485 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5486 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5487 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5488 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5489 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5491 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5495 if (pci_read_config_word(dev, GGC, &ggc))
5498 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5499 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5501 } else if (dmar_map_gfx) {
5502 /* we have to ensure the gfx device is idle before we flush */
5503 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5504 intel_iommu_strict = 1;
5507 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5508 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5509 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5510 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5512 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5513 ISOCH DMAR unit for the Azalia sound device, but not give it any
5514 TLB entries, which causes it to deadlock. Check for that. We do
5515 this in a function called from init_dmars(), instead of in a PCI
5516 quirk, because we don't want to print the obnoxious "BIOS broken"
5517 message if VT-d is actually disabled.
5519 static void __init check_tylersburg_isoch(void)
5521 struct pci_dev *pdev;
5522 uint32_t vtisochctrl;
5524 /* If there's no Azalia in the system anyway, forget it. */
5525 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5530 /* System Management Registers. Might be hidden, in which case
5531 we can't do the sanity check. But that's OK, because the
5532 known-broken BIOSes _don't_ actually hide it, so far. */
5533 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5537 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5544 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5545 if (vtisochctrl & 1)
5548 /* Drop all bits other than the number of TLB entries */
5549 vtisochctrl &= 0x1c;
5551 /* If we have the recommended number of TLB entries (16), fine. */
5552 if (vtisochctrl == 0x10)
5555 /* Zero TLB entries? You get to ride the short bus to school. */
5557 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5558 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5559 dmi_get_system_info(DMI_BIOS_VENDOR),
5560 dmi_get_system_info(DMI_BIOS_VERSION),
5561 dmi_get_system_info(DMI_PRODUCT_VERSION));
5562 iommu_identity_mapping |= IDENTMAP_AZALIA;
5566 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",