Merge tag 'sound-fix-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-microblaze.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22 #define dev_fmt(fmt)    pr_fmt(fmt)
23
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/memory.h>
37 #include <linux/cpu.h>
38 #include <linux/timer.h>
39 #include <linux/io.h>
40 #include <linux/iova.h>
41 #include <linux/iommu.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/syscore_ops.h>
44 #include <linux/tboot.h>
45 #include <linux/dmi.h>
46 #include <linux/pci-ats.h>
47 #include <linux/memblock.h>
48 #include <linux/dma-contiguous.h>
49 #include <linux/dma-direct.h>
50 #include <linux/crash_dump.h>
51 #include <linux/numa.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/cacheflush.h>
54 #include <asm/iommu.h>
55
56 #include "irq_remapping.h"
57 #include "intel-pasid.h"
58
59 #define ROOT_SIZE               VTD_PAGE_SIZE
60 #define CONTEXT_SIZE            VTD_PAGE_SIZE
61
62 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
63 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
64 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
65 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
66
67 #define IOAPIC_RANGE_START      (0xfee00000)
68 #define IOAPIC_RANGE_END        (0xfeefffff)
69 #define IOVA_START_ADDR         (0x1000)
70
71 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
72
73 #define MAX_AGAW_WIDTH 64
74 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
75
76 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
77 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
78
79 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
80    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
81 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
82                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
83 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
84
85 /* IO virtual address start page frame number */
86 #define IOVA_START_PFN          (1)
87
88 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
89
90 /* page table handling */
91 #define LEVEL_STRIDE            (9)
92 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
93
94 /*
95  * This bitmap is used to advertise the page sizes our hardware support
96  * to the IOMMU core, which will then use this information to split
97  * physically contiguous memory regions it is mapping into page sizes
98  * that we support.
99  *
100  * Traditionally the IOMMU core just handed us the mappings directly,
101  * after making sure the size is an order of a 4KiB page and that the
102  * mapping has natural alignment.
103  *
104  * To retain this behavior, we currently advertise that we support
105  * all page sizes that are an order of 4KiB.
106  *
107  * If at some point we'd like to utilize the IOMMU core's new behavior,
108  * we could change this to advertise the real page sizes we support.
109  */
110 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
111
112 static inline int agaw_to_level(int agaw)
113 {
114         return agaw + 2;
115 }
116
117 static inline int agaw_to_width(int agaw)
118 {
119         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
120 }
121
122 static inline int width_to_agaw(int width)
123 {
124         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
125 }
126
127 static inline unsigned int level_to_offset_bits(int level)
128 {
129         return (level - 1) * LEVEL_STRIDE;
130 }
131
132 static inline int pfn_level_offset(unsigned long pfn, int level)
133 {
134         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
135 }
136
137 static inline unsigned long level_mask(int level)
138 {
139         return -1UL << level_to_offset_bits(level);
140 }
141
142 static inline unsigned long level_size(int level)
143 {
144         return 1UL << level_to_offset_bits(level);
145 }
146
147 static inline unsigned long align_to_level(unsigned long pfn, int level)
148 {
149         return (pfn + level_size(level) - 1) & level_mask(level);
150 }
151
152 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
153 {
154         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
155 }
156
157 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
158    are never going to work. */
159 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
160 {
161         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 }
163
164 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
165 {
166         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
167 }
168 static inline unsigned long page_to_dma_pfn(struct page *pg)
169 {
170         return mm_to_dma_pfn(page_to_pfn(pg));
171 }
172 static inline unsigned long virt_to_dma_pfn(void *p)
173 {
174         return page_to_dma_pfn(virt_to_page(p));
175 }
176
177 /* global iommu list, set NULL for ignored DMAR units */
178 static struct intel_iommu **g_iommus;
179
180 static void __init check_tylersburg_isoch(void);
181 static int rwbf_quirk;
182
183 /*
184  * set to 1 to panic kernel if can't successfully enable VT-d
185  * (used when kernel is launched w/ TXT)
186  */
187 static int force_on = 0;
188 int intel_iommu_tboot_noforce;
189 static int no_platform_optin;
190
191 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
192
193 /*
194  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
195  * if marked present.
196  */
197 static phys_addr_t root_entry_lctp(struct root_entry *re)
198 {
199         if (!(re->lo & 1))
200                 return 0;
201
202         return re->lo & VTD_PAGE_MASK;
203 }
204
205 /*
206  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
207  * if marked present.
208  */
209 static phys_addr_t root_entry_uctp(struct root_entry *re)
210 {
211         if (!(re->hi & 1))
212                 return 0;
213
214         return re->hi & VTD_PAGE_MASK;
215 }
216
217 static inline void context_clear_pasid_enable(struct context_entry *context)
218 {
219         context->lo &= ~(1ULL << 11);
220 }
221
222 static inline bool context_pasid_enabled(struct context_entry *context)
223 {
224         return !!(context->lo & (1ULL << 11));
225 }
226
227 static inline void context_set_copied(struct context_entry *context)
228 {
229         context->hi |= (1ull << 3);
230 }
231
232 static inline bool context_copied(struct context_entry *context)
233 {
234         return !!(context->hi & (1ULL << 3));
235 }
236
237 static inline bool __context_present(struct context_entry *context)
238 {
239         return (context->lo & 1);
240 }
241
242 bool context_present(struct context_entry *context)
243 {
244         return context_pasid_enabled(context) ?
245              __context_present(context) :
246              __context_present(context) && !context_copied(context);
247 }
248
249 static inline void context_set_present(struct context_entry *context)
250 {
251         context->lo |= 1;
252 }
253
254 static inline void context_set_fault_enable(struct context_entry *context)
255 {
256         context->lo &= (((u64)-1) << 2) | 1;
257 }
258
259 static inline void context_set_translation_type(struct context_entry *context,
260                                                 unsigned long value)
261 {
262         context->lo &= (((u64)-1) << 4) | 3;
263         context->lo |= (value & 3) << 2;
264 }
265
266 static inline void context_set_address_root(struct context_entry *context,
267                                             unsigned long value)
268 {
269         context->lo &= ~VTD_PAGE_MASK;
270         context->lo |= value & VTD_PAGE_MASK;
271 }
272
273 static inline void context_set_address_width(struct context_entry *context,
274                                              unsigned long value)
275 {
276         context->hi |= value & 7;
277 }
278
279 static inline void context_set_domain_id(struct context_entry *context,
280                                          unsigned long value)
281 {
282         context->hi |= (value & ((1 << 16) - 1)) << 8;
283 }
284
285 static inline int context_domain_id(struct context_entry *c)
286 {
287         return((c->hi >> 8) & 0xffff);
288 }
289
290 static inline void context_clear_entry(struct context_entry *context)
291 {
292         context->lo = 0;
293         context->hi = 0;
294 }
295
296 /*
297  * This domain is a statically identity mapping domain.
298  *      1. This domain creats a static 1:1 mapping to all usable memory.
299  *      2. It maps to each iommu if successful.
300  *      3. Each iommu mapps to this domain if successful.
301  */
302 static struct dmar_domain *si_domain;
303 static int hw_pass_through = 1;
304
305 /*
306  * Domain represents a virtual machine, more than one devices
307  * across iommus may be owned in one domain, e.g. kvm guest.
308  */
309 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
310
311 /* si_domain contains mulitple devices */
312 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
313
314 #define for_each_domain_iommu(idx, domain)                      \
315         for (idx = 0; idx < g_num_of_iommus; idx++)             \
316                 if (domain->iommu_refcnt[idx])
317
318 struct dmar_rmrr_unit {
319         struct list_head list;          /* list of rmrr units   */
320         struct acpi_dmar_header *hdr;   /* ACPI header          */
321         u64     base_address;           /* reserved base address*/
322         u64     end_address;            /* reserved end address */
323         struct dmar_dev_scope *devices; /* target devices */
324         int     devices_cnt;            /* target device count */
325         struct iommu_resv_region *resv; /* reserved region handle */
326 };
327
328 struct dmar_atsr_unit {
329         struct list_head list;          /* list of ATSR units */
330         struct acpi_dmar_header *hdr;   /* ACPI header */
331         struct dmar_dev_scope *devices; /* target devices */
332         int devices_cnt;                /* target device count */
333         u8 include_all:1;               /* include all ports */
334 };
335
336 static LIST_HEAD(dmar_atsr_units);
337 static LIST_HEAD(dmar_rmrr_units);
338
339 #define for_each_rmrr_units(rmrr) \
340         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
341
342 /* bitmap for indexing intel_iommus */
343 static int g_num_of_iommus;
344
345 static void domain_exit(struct dmar_domain *domain);
346 static void domain_remove_dev_info(struct dmar_domain *domain);
347 static void dmar_remove_one_dev_info(struct device *dev);
348 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
349 static void domain_context_clear(struct intel_iommu *iommu,
350                                  struct device *dev);
351 static int domain_detach_iommu(struct dmar_domain *domain,
352                                struct intel_iommu *iommu);
353
354 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
355 int dmar_disabled = 0;
356 #else
357 int dmar_disabled = 1;
358 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
359
360 int intel_iommu_enabled = 0;
361 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
362
363 static int dmar_map_gfx = 1;
364 static int dmar_forcedac;
365 static int intel_iommu_strict;
366 static int intel_iommu_superpage = 1;
367 static int intel_iommu_sm;
368 static int iommu_identity_mapping;
369
370 #define IDENTMAP_ALL            1
371 #define IDENTMAP_GFX            2
372 #define IDENTMAP_AZALIA         4
373
374 #define sm_supported(iommu)     (intel_iommu_sm && ecap_smts((iommu)->ecap))
375 #define pasid_supported(iommu)  (sm_supported(iommu) &&                 \
376                                  ecap_pasid((iommu)->ecap))
377
378 int intel_iommu_gfx_mapped;
379 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
380
381 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
382 static DEFINE_SPINLOCK(device_domain_lock);
383 static LIST_HEAD(device_domain_list);
384
385 /*
386  * Iterate over elements in device_domain_list and call the specified
387  * callback @fn against each element.
388  */
389 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
390                                      void *data), void *data)
391 {
392         int ret = 0;
393         unsigned long flags;
394         struct device_domain_info *info;
395
396         spin_lock_irqsave(&device_domain_lock, flags);
397         list_for_each_entry(info, &device_domain_list, global) {
398                 ret = fn(info, data);
399                 if (ret) {
400                         spin_unlock_irqrestore(&device_domain_lock, flags);
401                         return ret;
402                 }
403         }
404         spin_unlock_irqrestore(&device_domain_lock, flags);
405
406         return 0;
407 }
408
409 const struct iommu_ops intel_iommu_ops;
410
411 static bool translation_pre_enabled(struct intel_iommu *iommu)
412 {
413         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
414 }
415
416 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
417 {
418         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
419 }
420
421 static void init_translation_status(struct intel_iommu *iommu)
422 {
423         u32 gsts;
424
425         gsts = readl(iommu->reg + DMAR_GSTS_REG);
426         if (gsts & DMA_GSTS_TES)
427                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
428 }
429
430 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
431 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
432 {
433         return container_of(dom, struct dmar_domain, domain);
434 }
435
436 static int __init intel_iommu_setup(char *str)
437 {
438         if (!str)
439                 return -EINVAL;
440         while (*str) {
441                 if (!strncmp(str, "on", 2)) {
442                         dmar_disabled = 0;
443                         pr_info("IOMMU enabled\n");
444                 } else if (!strncmp(str, "off", 3)) {
445                         dmar_disabled = 1;
446                         no_platform_optin = 1;
447                         pr_info("IOMMU disabled\n");
448                 } else if (!strncmp(str, "igfx_off", 8)) {
449                         dmar_map_gfx = 0;
450                         pr_info("Disable GFX device mapping\n");
451                 } else if (!strncmp(str, "forcedac", 8)) {
452                         pr_info("Forcing DAC for PCI devices\n");
453                         dmar_forcedac = 1;
454                 } else if (!strncmp(str, "strict", 6)) {
455                         pr_info("Disable batched IOTLB flush\n");
456                         intel_iommu_strict = 1;
457                 } else if (!strncmp(str, "sp_off", 6)) {
458                         pr_info("Disable supported super page\n");
459                         intel_iommu_superpage = 0;
460                 } else if (!strncmp(str, "sm_on", 5)) {
461                         pr_info("Intel-IOMMU: scalable mode supported\n");
462                         intel_iommu_sm = 1;
463                 } else if (!strncmp(str, "tboot_noforce", 13)) {
464                         printk(KERN_INFO
465                                 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
466                         intel_iommu_tboot_noforce = 1;
467                 }
468
469                 str += strcspn(str, ",");
470                 while (*str == ',')
471                         str++;
472         }
473         return 0;
474 }
475 __setup("intel_iommu=", intel_iommu_setup);
476
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
479
480 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
481 {
482         struct dmar_domain **domains;
483         int idx = did >> 8;
484
485         domains = iommu->domains[idx];
486         if (!domains)
487                 return NULL;
488
489         return domains[did & 0xff];
490 }
491
492 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
493                              struct dmar_domain *domain)
494 {
495         struct dmar_domain **domains;
496         int idx = did >> 8;
497
498         if (!iommu->domains[idx]) {
499                 size_t size = 256 * sizeof(struct dmar_domain *);
500                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
501         }
502
503         domains = iommu->domains[idx];
504         if (WARN_ON(!domains))
505                 return;
506         else
507                 domains[did & 0xff] = domain;
508 }
509
510 void *alloc_pgtable_page(int node)
511 {
512         struct page *page;
513         void *vaddr = NULL;
514
515         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
516         if (page)
517                 vaddr = page_address(page);
518         return vaddr;
519 }
520
521 void free_pgtable_page(void *vaddr)
522 {
523         free_page((unsigned long)vaddr);
524 }
525
526 static inline void *alloc_domain_mem(void)
527 {
528         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
529 }
530
531 static void free_domain_mem(void *vaddr)
532 {
533         kmem_cache_free(iommu_domain_cache, vaddr);
534 }
535
536 static inline void * alloc_devinfo_mem(void)
537 {
538         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
539 }
540
541 static inline void free_devinfo_mem(void *vaddr)
542 {
543         kmem_cache_free(iommu_devinfo_cache, vaddr);
544 }
545
546 static inline int domain_type_is_vm(struct dmar_domain *domain)
547 {
548         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
549 }
550
551 static inline int domain_type_is_si(struct dmar_domain *domain)
552 {
553         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
554 }
555
556 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
557 {
558         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
559                                 DOMAIN_FLAG_STATIC_IDENTITY);
560 }
561
562 static inline int domain_pfn_supported(struct dmar_domain *domain,
563                                        unsigned long pfn)
564 {
565         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
566
567         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
568 }
569
570 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
571 {
572         unsigned long sagaw;
573         int agaw = -1;
574
575         sagaw = cap_sagaw(iommu->cap);
576         for (agaw = width_to_agaw(max_gaw);
577              agaw >= 0; agaw--) {
578                 if (test_bit(agaw, &sagaw))
579                         break;
580         }
581
582         return agaw;
583 }
584
585 /*
586  * Calculate max SAGAW for each iommu.
587  */
588 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
589 {
590         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
591 }
592
593 /*
594  * calculate agaw for each iommu.
595  * "SAGAW" may be different across iommus, use a default agaw, and
596  * get a supported less agaw for iommus that don't support the default agaw.
597  */
598 int iommu_calculate_agaw(struct intel_iommu *iommu)
599 {
600         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
601 }
602
603 /* This functionin only returns single iommu in a domain */
604 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
605 {
606         int iommu_id;
607
608         /* si_domain and vm domain should not get here. */
609         BUG_ON(domain_type_is_vm_or_si(domain));
610         for_each_domain_iommu(iommu_id, domain)
611                 break;
612
613         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
614                 return NULL;
615
616         return g_iommus[iommu_id];
617 }
618
619 static void domain_update_iommu_coherency(struct dmar_domain *domain)
620 {
621         struct dmar_drhd_unit *drhd;
622         struct intel_iommu *iommu;
623         bool found = false;
624         int i;
625
626         domain->iommu_coherency = 1;
627
628         for_each_domain_iommu(i, domain) {
629                 found = true;
630                 if (!ecap_coherent(g_iommus[i]->ecap)) {
631                         domain->iommu_coherency = 0;
632                         break;
633                 }
634         }
635         if (found)
636                 return;
637
638         /* No hardware attached; use lowest common denominator */
639         rcu_read_lock();
640         for_each_active_iommu(iommu, drhd) {
641                 if (!ecap_coherent(iommu->ecap)) {
642                         domain->iommu_coherency = 0;
643                         break;
644                 }
645         }
646         rcu_read_unlock();
647 }
648
649 static int domain_update_iommu_snooping(struct intel_iommu *skip)
650 {
651         struct dmar_drhd_unit *drhd;
652         struct intel_iommu *iommu;
653         int ret = 1;
654
655         rcu_read_lock();
656         for_each_active_iommu(iommu, drhd) {
657                 if (iommu != skip) {
658                         if (!ecap_sc_support(iommu->ecap)) {
659                                 ret = 0;
660                                 break;
661                         }
662                 }
663         }
664         rcu_read_unlock();
665
666         return ret;
667 }
668
669 static int domain_update_iommu_superpage(struct intel_iommu *skip)
670 {
671         struct dmar_drhd_unit *drhd;
672         struct intel_iommu *iommu;
673         int mask = 0xf;
674
675         if (!intel_iommu_superpage) {
676                 return 0;
677         }
678
679         /* set iommu_superpage to the smallest common denominator */
680         rcu_read_lock();
681         for_each_active_iommu(iommu, drhd) {
682                 if (iommu != skip) {
683                         mask &= cap_super_page_val(iommu->cap);
684                         if (!mask)
685                                 break;
686                 }
687         }
688         rcu_read_unlock();
689
690         return fls(mask);
691 }
692
693 /* Some capabilities may be different across iommus */
694 static void domain_update_iommu_cap(struct dmar_domain *domain)
695 {
696         domain_update_iommu_coherency(domain);
697         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
698         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
699 }
700
701 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
702                                          u8 devfn, int alloc)
703 {
704         struct root_entry *root = &iommu->root_entry[bus];
705         struct context_entry *context;
706         u64 *entry;
707
708         entry = &root->lo;
709         if (sm_supported(iommu)) {
710                 if (devfn >= 0x80) {
711                         devfn -= 0x80;
712                         entry = &root->hi;
713                 }
714                 devfn *= 2;
715         }
716         if (*entry & 1)
717                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
718         else {
719                 unsigned long phy_addr;
720                 if (!alloc)
721                         return NULL;
722
723                 context = alloc_pgtable_page(iommu->node);
724                 if (!context)
725                         return NULL;
726
727                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
728                 phy_addr = virt_to_phys((void *)context);
729                 *entry = phy_addr | 1;
730                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
731         }
732         return &context[devfn];
733 }
734
735 static int iommu_dummy(struct device *dev)
736 {
737         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
738 }
739
740 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
741 {
742         struct dmar_drhd_unit *drhd = NULL;
743         struct intel_iommu *iommu;
744         struct device *tmp;
745         struct pci_dev *ptmp, *pdev = NULL;
746         u16 segment = 0;
747         int i;
748
749         if (iommu_dummy(dev))
750                 return NULL;
751
752         if (dev_is_pci(dev)) {
753                 struct pci_dev *pf_pdev;
754
755                 pdev = to_pci_dev(dev);
756
757 #ifdef CONFIG_X86
758                 /* VMD child devices currently cannot be handled individually */
759                 if (is_vmd(pdev->bus))
760                         return NULL;
761 #endif
762
763                 /* VFs aren't listed in scope tables; we need to look up
764                  * the PF instead to find the IOMMU. */
765                 pf_pdev = pci_physfn(pdev);
766                 dev = &pf_pdev->dev;
767                 segment = pci_domain_nr(pdev->bus);
768         } else if (has_acpi_companion(dev))
769                 dev = &ACPI_COMPANION(dev)->dev;
770
771         rcu_read_lock();
772         for_each_active_iommu(iommu, drhd) {
773                 if (pdev && segment != drhd->segment)
774                         continue;
775
776                 for_each_active_dev_scope(drhd->devices,
777                                           drhd->devices_cnt, i, tmp) {
778                         if (tmp == dev) {
779                                 /* For a VF use its original BDF# not that of the PF
780                                  * which we used for the IOMMU lookup. Strictly speaking
781                                  * we could do this for all PCI devices; we only need to
782                                  * get the BDF# from the scope table for ACPI matches. */
783                                 if (pdev && pdev->is_virtfn)
784                                         goto got_pdev;
785
786                                 *bus = drhd->devices[i].bus;
787                                 *devfn = drhd->devices[i].devfn;
788                                 goto out;
789                         }
790
791                         if (!pdev || !dev_is_pci(tmp))
792                                 continue;
793
794                         ptmp = to_pci_dev(tmp);
795                         if (ptmp->subordinate &&
796                             ptmp->subordinate->number <= pdev->bus->number &&
797                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
798                                 goto got_pdev;
799                 }
800
801                 if (pdev && drhd->include_all) {
802                 got_pdev:
803                         *bus = pdev->bus->number;
804                         *devfn = pdev->devfn;
805                         goto out;
806                 }
807         }
808         iommu = NULL;
809  out:
810         rcu_read_unlock();
811
812         return iommu;
813 }
814
815 static void domain_flush_cache(struct dmar_domain *domain,
816                                void *addr, int size)
817 {
818         if (!domain->iommu_coherency)
819                 clflush_cache_range(addr, size);
820 }
821
822 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
823 {
824         struct context_entry *context;
825         int ret = 0;
826         unsigned long flags;
827
828         spin_lock_irqsave(&iommu->lock, flags);
829         context = iommu_context_addr(iommu, bus, devfn, 0);
830         if (context)
831                 ret = context_present(context);
832         spin_unlock_irqrestore(&iommu->lock, flags);
833         return ret;
834 }
835
836 static void free_context_table(struct intel_iommu *iommu)
837 {
838         int i;
839         unsigned long flags;
840         struct context_entry *context;
841
842         spin_lock_irqsave(&iommu->lock, flags);
843         if (!iommu->root_entry) {
844                 goto out;
845         }
846         for (i = 0; i < ROOT_ENTRY_NR; i++) {
847                 context = iommu_context_addr(iommu, i, 0, 0);
848                 if (context)
849                         free_pgtable_page(context);
850
851                 if (!sm_supported(iommu))
852                         continue;
853
854                 context = iommu_context_addr(iommu, i, 0x80, 0);
855                 if (context)
856                         free_pgtable_page(context);
857
858         }
859         free_pgtable_page(iommu->root_entry);
860         iommu->root_entry = NULL;
861 out:
862         spin_unlock_irqrestore(&iommu->lock, flags);
863 }
864
865 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
866                                       unsigned long pfn, int *target_level)
867 {
868         struct dma_pte *parent, *pte;
869         int level = agaw_to_level(domain->agaw);
870         int offset;
871
872         BUG_ON(!domain->pgd);
873
874         if (!domain_pfn_supported(domain, pfn))
875                 /* Address beyond IOMMU's addressing capabilities. */
876                 return NULL;
877
878         parent = domain->pgd;
879
880         while (1) {
881                 void *tmp_page;
882
883                 offset = pfn_level_offset(pfn, level);
884                 pte = &parent[offset];
885                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
886                         break;
887                 if (level == *target_level)
888                         break;
889
890                 if (!dma_pte_present(pte)) {
891                         uint64_t pteval;
892
893                         tmp_page = alloc_pgtable_page(domain->nid);
894
895                         if (!tmp_page)
896                                 return NULL;
897
898                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
899                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
900                         if (cmpxchg64(&pte->val, 0ULL, pteval))
901                                 /* Someone else set it while we were thinking; use theirs. */
902                                 free_pgtable_page(tmp_page);
903                         else
904                                 domain_flush_cache(domain, pte, sizeof(*pte));
905                 }
906                 if (level == 1)
907                         break;
908
909                 parent = phys_to_virt(dma_pte_addr(pte));
910                 level--;
911         }
912
913         if (!*target_level)
914                 *target_level = level;
915
916         return pte;
917 }
918
919
920 /* return address's pte at specific level */
921 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
922                                          unsigned long pfn,
923                                          int level, int *large_page)
924 {
925         struct dma_pte *parent, *pte;
926         int total = agaw_to_level(domain->agaw);
927         int offset;
928
929         parent = domain->pgd;
930         while (level <= total) {
931                 offset = pfn_level_offset(pfn, total);
932                 pte = &parent[offset];
933                 if (level == total)
934                         return pte;
935
936                 if (!dma_pte_present(pte)) {
937                         *large_page = total;
938                         break;
939                 }
940
941                 if (dma_pte_superpage(pte)) {
942                         *large_page = total;
943                         return pte;
944                 }
945
946                 parent = phys_to_virt(dma_pte_addr(pte));
947                 total--;
948         }
949         return NULL;
950 }
951
952 /* clear last level pte, a tlb flush should be followed */
953 static void dma_pte_clear_range(struct dmar_domain *domain,
954                                 unsigned long start_pfn,
955                                 unsigned long last_pfn)
956 {
957         unsigned int large_page;
958         struct dma_pte *first_pte, *pte;
959
960         BUG_ON(!domain_pfn_supported(domain, start_pfn));
961         BUG_ON(!domain_pfn_supported(domain, last_pfn));
962         BUG_ON(start_pfn > last_pfn);
963
964         /* we don't need lock here; nobody else touches the iova range */
965         do {
966                 large_page = 1;
967                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
968                 if (!pte) {
969                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
970                         continue;
971                 }
972                 do {
973                         dma_clear_pte(pte);
974                         start_pfn += lvl_to_nr_pages(large_page);
975                         pte++;
976                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
977
978                 domain_flush_cache(domain, first_pte,
979                                    (void *)pte - (void *)first_pte);
980
981         } while (start_pfn && start_pfn <= last_pfn);
982 }
983
984 static void dma_pte_free_level(struct dmar_domain *domain, int level,
985                                int retain_level, struct dma_pte *pte,
986                                unsigned long pfn, unsigned long start_pfn,
987                                unsigned long last_pfn)
988 {
989         pfn = max(start_pfn, pfn);
990         pte = &pte[pfn_level_offset(pfn, level)];
991
992         do {
993                 unsigned long level_pfn;
994                 struct dma_pte *level_pte;
995
996                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
997                         goto next;
998
999                 level_pfn = pfn & level_mask(level);
1000                 level_pte = phys_to_virt(dma_pte_addr(pte));
1001
1002                 if (level > 2) {
1003                         dma_pte_free_level(domain, level - 1, retain_level,
1004                                            level_pte, level_pfn, start_pfn,
1005                                            last_pfn);
1006                 }
1007
1008                 /*
1009                  * Free the page table if we're below the level we want to
1010                  * retain and the range covers the entire table.
1011                  */
1012                 if (level < retain_level && !(start_pfn > level_pfn ||
1013                       last_pfn < level_pfn + level_size(level) - 1)) {
1014                         dma_clear_pte(pte);
1015                         domain_flush_cache(domain, pte, sizeof(*pte));
1016                         free_pgtable_page(level_pte);
1017                 }
1018 next:
1019                 pfn += level_size(level);
1020         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1021 }
1022
1023 /*
1024  * clear last level (leaf) ptes and free page table pages below the
1025  * level we wish to keep intact.
1026  */
1027 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1028                                    unsigned long start_pfn,
1029                                    unsigned long last_pfn,
1030                                    int retain_level)
1031 {
1032         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1033         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1034         BUG_ON(start_pfn > last_pfn);
1035
1036         dma_pte_clear_range(domain, start_pfn, last_pfn);
1037
1038         /* We don't need lock here; nobody else touches the iova range */
1039         dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1040                            domain->pgd, 0, start_pfn, last_pfn);
1041
1042         /* free pgd */
1043         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1044                 free_pgtable_page(domain->pgd);
1045                 domain->pgd = NULL;
1046         }
1047 }
1048
1049 /* When a page at a given level is being unlinked from its parent, we don't
1050    need to *modify* it at all. All we need to do is make a list of all the
1051    pages which can be freed just as soon as we've flushed the IOTLB and we
1052    know the hardware page-walk will no longer touch them.
1053    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1054    be freed. */
1055 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1056                                             int level, struct dma_pte *pte,
1057                                             struct page *freelist)
1058 {
1059         struct page *pg;
1060
1061         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1062         pg->freelist = freelist;
1063         freelist = pg;
1064
1065         if (level == 1)
1066                 return freelist;
1067
1068         pte = page_address(pg);
1069         do {
1070                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1071                         freelist = dma_pte_list_pagetables(domain, level - 1,
1072                                                            pte, freelist);
1073                 pte++;
1074         } while (!first_pte_in_page(pte));
1075
1076         return freelist;
1077 }
1078
1079 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1080                                         struct dma_pte *pte, unsigned long pfn,
1081                                         unsigned long start_pfn,
1082                                         unsigned long last_pfn,
1083                                         struct page *freelist)
1084 {
1085         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1086
1087         pfn = max(start_pfn, pfn);
1088         pte = &pte[pfn_level_offset(pfn, level)];
1089
1090         do {
1091                 unsigned long level_pfn;
1092
1093                 if (!dma_pte_present(pte))
1094                         goto next;
1095
1096                 level_pfn = pfn & level_mask(level);
1097
1098                 /* If range covers entire pagetable, free it */
1099                 if (start_pfn <= level_pfn &&
1100                     last_pfn >= level_pfn + level_size(level) - 1) {
1101                         /* These suborbinate page tables are going away entirely. Don't
1102                            bother to clear them; we're just going to *free* them. */
1103                         if (level > 1 && !dma_pte_superpage(pte))
1104                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1105
1106                         dma_clear_pte(pte);
1107                         if (!first_pte)
1108                                 first_pte = pte;
1109                         last_pte = pte;
1110                 } else if (level > 1) {
1111                         /* Recurse down into a level that isn't *entirely* obsolete */
1112                         freelist = dma_pte_clear_level(domain, level - 1,
1113                                                        phys_to_virt(dma_pte_addr(pte)),
1114                                                        level_pfn, start_pfn, last_pfn,
1115                                                        freelist);
1116                 }
1117 next:
1118                 pfn += level_size(level);
1119         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1120
1121         if (first_pte)
1122                 domain_flush_cache(domain, first_pte,
1123                                    (void *)++last_pte - (void *)first_pte);
1124
1125         return freelist;
1126 }
1127
1128 /* We can't just free the pages because the IOMMU may still be walking
1129    the page tables, and may have cached the intermediate levels. The
1130    pages can only be freed after the IOTLB flush has been done. */
1131 static struct page *domain_unmap(struct dmar_domain *domain,
1132                                  unsigned long start_pfn,
1133                                  unsigned long last_pfn)
1134 {
1135         struct page *freelist;
1136
1137         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1138         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1139         BUG_ON(start_pfn > last_pfn);
1140
1141         /* we don't need lock here; nobody else touches the iova range */
1142         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1143                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1144
1145         /* free pgd */
1146         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1147                 struct page *pgd_page = virt_to_page(domain->pgd);
1148                 pgd_page->freelist = freelist;
1149                 freelist = pgd_page;
1150
1151                 domain->pgd = NULL;
1152         }
1153
1154         return freelist;
1155 }
1156
1157 static void dma_free_pagelist(struct page *freelist)
1158 {
1159         struct page *pg;
1160
1161         while ((pg = freelist)) {
1162                 freelist = pg->freelist;
1163                 free_pgtable_page(page_address(pg));
1164         }
1165 }
1166
1167 static void iova_entry_free(unsigned long data)
1168 {
1169         struct page *freelist = (struct page *)data;
1170
1171         dma_free_pagelist(freelist);
1172 }
1173
1174 /* iommu handling */
1175 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1176 {
1177         struct root_entry *root;
1178         unsigned long flags;
1179
1180         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1181         if (!root) {
1182                 pr_err("Allocating root entry for %s failed\n",
1183                         iommu->name);
1184                 return -ENOMEM;
1185         }
1186
1187         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1188
1189         spin_lock_irqsave(&iommu->lock, flags);
1190         iommu->root_entry = root;
1191         spin_unlock_irqrestore(&iommu->lock, flags);
1192
1193         return 0;
1194 }
1195
1196 static void iommu_set_root_entry(struct intel_iommu *iommu)
1197 {
1198         u64 addr;
1199         u32 sts;
1200         unsigned long flag;
1201
1202         addr = virt_to_phys(iommu->root_entry);
1203         if (sm_supported(iommu))
1204                 addr |= DMA_RTADDR_SMT;
1205
1206         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1207         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1208
1209         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1210
1211         /* Make sure hardware complete it */
1212         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1213                       readl, (sts & DMA_GSTS_RTPS), sts);
1214
1215         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1216 }
1217
1218 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1219 {
1220         u32 val;
1221         unsigned long flag;
1222
1223         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1224                 return;
1225
1226         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1227         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1228
1229         /* Make sure hardware complete it */
1230         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1231                       readl, (!(val & DMA_GSTS_WBFS)), val);
1232
1233         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1234 }
1235
1236 /* return value determine if we need a write buffer flush */
1237 static void __iommu_flush_context(struct intel_iommu *iommu,
1238                                   u16 did, u16 source_id, u8 function_mask,
1239                                   u64 type)
1240 {
1241         u64 val = 0;
1242         unsigned long flag;
1243
1244         switch (type) {
1245         case DMA_CCMD_GLOBAL_INVL:
1246                 val = DMA_CCMD_GLOBAL_INVL;
1247                 break;
1248         case DMA_CCMD_DOMAIN_INVL:
1249                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1250                 break;
1251         case DMA_CCMD_DEVICE_INVL:
1252                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1253                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1254                 break;
1255         default:
1256                 BUG();
1257         }
1258         val |= DMA_CCMD_ICC;
1259
1260         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1261         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1262
1263         /* Make sure hardware complete it */
1264         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1265                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1266
1267         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1268 }
1269
1270 /* return value determine if we need a write buffer flush */
1271 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1272                                 u64 addr, unsigned int size_order, u64 type)
1273 {
1274         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1275         u64 val = 0, val_iva = 0;
1276         unsigned long flag;
1277
1278         switch (type) {
1279         case DMA_TLB_GLOBAL_FLUSH:
1280                 /* global flush doesn't need set IVA_REG */
1281                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1282                 break;
1283         case DMA_TLB_DSI_FLUSH:
1284                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1285                 break;
1286         case DMA_TLB_PSI_FLUSH:
1287                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1288                 /* IH bit is passed in as part of address */
1289                 val_iva = size_order | addr;
1290                 break;
1291         default:
1292                 BUG();
1293         }
1294         /* Note: set drain read/write */
1295 #if 0
1296         /*
1297          * This is probably to be super secure.. Looks like we can
1298          * ignore it without any impact.
1299          */
1300         if (cap_read_drain(iommu->cap))
1301                 val |= DMA_TLB_READ_DRAIN;
1302 #endif
1303         if (cap_write_drain(iommu->cap))
1304                 val |= DMA_TLB_WRITE_DRAIN;
1305
1306         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1307         /* Note: Only uses first TLB reg currently */
1308         if (val_iva)
1309                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1310         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1311
1312         /* Make sure hardware complete it */
1313         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1314                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1315
1316         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1317
1318         /* check IOTLB invalidation granularity */
1319         if (DMA_TLB_IAIG(val) == 0)
1320                 pr_err("Flush IOTLB failed\n");
1321         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1322                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1323                         (unsigned long long)DMA_TLB_IIRG(type),
1324                         (unsigned long long)DMA_TLB_IAIG(val));
1325 }
1326
1327 static struct device_domain_info *
1328 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1329                          u8 bus, u8 devfn)
1330 {
1331         struct device_domain_info *info;
1332
1333         assert_spin_locked(&device_domain_lock);
1334
1335         if (!iommu->qi)
1336                 return NULL;
1337
1338         list_for_each_entry(info, &domain->devices, link)
1339                 if (info->iommu == iommu && info->bus == bus &&
1340                     info->devfn == devfn) {
1341                         if (info->ats_supported && info->dev)
1342                                 return info;
1343                         break;
1344                 }
1345
1346         return NULL;
1347 }
1348
1349 static void domain_update_iotlb(struct dmar_domain *domain)
1350 {
1351         struct device_domain_info *info;
1352         bool has_iotlb_device = false;
1353
1354         assert_spin_locked(&device_domain_lock);
1355
1356         list_for_each_entry(info, &domain->devices, link) {
1357                 struct pci_dev *pdev;
1358
1359                 if (!info->dev || !dev_is_pci(info->dev))
1360                         continue;
1361
1362                 pdev = to_pci_dev(info->dev);
1363                 if (pdev->ats_enabled) {
1364                         has_iotlb_device = true;
1365                         break;
1366                 }
1367         }
1368
1369         domain->has_iotlb_device = has_iotlb_device;
1370 }
1371
1372 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1373 {
1374         struct pci_dev *pdev;
1375
1376         assert_spin_locked(&device_domain_lock);
1377
1378         if (!info || !dev_is_pci(info->dev))
1379                 return;
1380
1381         pdev = to_pci_dev(info->dev);
1382         /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1383          * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1384          * queue depth at PF level. If DIT is not set, PFSID will be treated as
1385          * reserved, which should be set to 0.
1386          */
1387         if (!ecap_dit(info->iommu->ecap))
1388                 info->pfsid = 0;
1389         else {
1390                 struct pci_dev *pf_pdev;
1391
1392                 /* pdev will be returned if device is not a vf */
1393                 pf_pdev = pci_physfn(pdev);
1394                 info->pfsid = pci_dev_id(pf_pdev);
1395         }
1396
1397 #ifdef CONFIG_INTEL_IOMMU_SVM
1398         /* The PCIe spec, in its wisdom, declares that the behaviour of
1399            the device if you enable PASID support after ATS support is
1400            undefined. So always enable PASID support on devices which
1401            have it, even if we can't yet know if we're ever going to
1402            use it. */
1403         if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1404                 info->pasid_enabled = 1;
1405
1406         if (info->pri_supported &&
1407             (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1)  &&
1408             !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1409                 info->pri_enabled = 1;
1410 #endif
1411         if (!pdev->untrusted && info->ats_supported &&
1412             pci_ats_page_aligned(pdev) &&
1413             !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1414                 info->ats_enabled = 1;
1415                 domain_update_iotlb(info->domain);
1416                 info->ats_qdep = pci_ats_queue_depth(pdev);
1417         }
1418 }
1419
1420 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1421 {
1422         struct pci_dev *pdev;
1423
1424         assert_spin_locked(&device_domain_lock);
1425
1426         if (!dev_is_pci(info->dev))
1427                 return;
1428
1429         pdev = to_pci_dev(info->dev);
1430
1431         if (info->ats_enabled) {
1432                 pci_disable_ats(pdev);
1433                 info->ats_enabled = 0;
1434                 domain_update_iotlb(info->domain);
1435         }
1436 #ifdef CONFIG_INTEL_IOMMU_SVM
1437         if (info->pri_enabled) {
1438                 pci_disable_pri(pdev);
1439                 info->pri_enabled = 0;
1440         }
1441         if (info->pasid_enabled) {
1442                 pci_disable_pasid(pdev);
1443                 info->pasid_enabled = 0;
1444         }
1445 #endif
1446 }
1447
1448 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1449                                   u64 addr, unsigned mask)
1450 {
1451         u16 sid, qdep;
1452         unsigned long flags;
1453         struct device_domain_info *info;
1454
1455         if (!domain->has_iotlb_device)
1456                 return;
1457
1458         spin_lock_irqsave(&device_domain_lock, flags);
1459         list_for_each_entry(info, &domain->devices, link) {
1460                 if (!info->ats_enabled)
1461                         continue;
1462
1463                 sid = info->bus << 8 | info->devfn;
1464                 qdep = info->ats_qdep;
1465                 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1466                                 qdep, addr, mask);
1467         }
1468         spin_unlock_irqrestore(&device_domain_lock, flags);
1469 }
1470
1471 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1472                                   struct dmar_domain *domain,
1473                                   unsigned long pfn, unsigned int pages,
1474                                   int ih, int map)
1475 {
1476         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1477         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1478         u16 did = domain->iommu_did[iommu->seq_id];
1479
1480         BUG_ON(pages == 0);
1481
1482         if (ih)
1483                 ih = 1 << 6;
1484         /*
1485          * Fallback to domain selective flush if no PSI support or the size is
1486          * too big.
1487          * PSI requires page size to be 2 ^ x, and the base address is naturally
1488          * aligned to the size
1489          */
1490         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1491                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1492                                                 DMA_TLB_DSI_FLUSH);
1493         else
1494                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1495                                                 DMA_TLB_PSI_FLUSH);
1496
1497         /*
1498          * In caching mode, changes of pages from non-present to present require
1499          * flush. However, device IOTLB doesn't need to be flushed in this case.
1500          */
1501         if (!cap_caching_mode(iommu->cap) || !map)
1502                 iommu_flush_dev_iotlb(domain, addr, mask);
1503 }
1504
1505 /* Notification for newly created mappings */
1506 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1507                                         struct dmar_domain *domain,
1508                                         unsigned long pfn, unsigned int pages)
1509 {
1510         /* It's a non-present to present mapping. Only flush if caching mode */
1511         if (cap_caching_mode(iommu->cap))
1512                 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1513         else
1514                 iommu_flush_write_buffer(iommu);
1515 }
1516
1517 static void iommu_flush_iova(struct iova_domain *iovad)
1518 {
1519         struct dmar_domain *domain;
1520         int idx;
1521
1522         domain = container_of(iovad, struct dmar_domain, iovad);
1523
1524         for_each_domain_iommu(idx, domain) {
1525                 struct intel_iommu *iommu = g_iommus[idx];
1526                 u16 did = domain->iommu_did[iommu->seq_id];
1527
1528                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1529
1530                 if (!cap_caching_mode(iommu->cap))
1531                         iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1532                                               0, MAX_AGAW_PFN_WIDTH);
1533         }
1534 }
1535
1536 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1537 {
1538         u32 pmen;
1539         unsigned long flags;
1540
1541         if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1542                 return;
1543
1544         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1545         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1546         pmen &= ~DMA_PMEN_EPM;
1547         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1548
1549         /* wait for the protected region status bit to clear */
1550         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1551                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1552
1553         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1554 }
1555
1556 static void iommu_enable_translation(struct intel_iommu *iommu)
1557 {
1558         u32 sts;
1559         unsigned long flags;
1560
1561         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1562         iommu->gcmd |= DMA_GCMD_TE;
1563         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1564
1565         /* Make sure hardware complete it */
1566         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1567                       readl, (sts & DMA_GSTS_TES), sts);
1568
1569         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1570 }
1571
1572 static void iommu_disable_translation(struct intel_iommu *iommu)
1573 {
1574         u32 sts;
1575         unsigned long flag;
1576
1577         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1578         iommu->gcmd &= ~DMA_GCMD_TE;
1579         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1580
1581         /* Make sure hardware complete it */
1582         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1583                       readl, (!(sts & DMA_GSTS_TES)), sts);
1584
1585         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1586 }
1587
1588
1589 static int iommu_init_domains(struct intel_iommu *iommu)
1590 {
1591         u32 ndomains, nlongs;
1592         size_t size;
1593
1594         ndomains = cap_ndoms(iommu->cap);
1595         pr_debug("%s: Number of Domains supported <%d>\n",
1596                  iommu->name, ndomains);
1597         nlongs = BITS_TO_LONGS(ndomains);
1598
1599         spin_lock_init(&iommu->lock);
1600
1601         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1602         if (!iommu->domain_ids) {
1603                 pr_err("%s: Allocating domain id array failed\n",
1604                        iommu->name);
1605                 return -ENOMEM;
1606         }
1607
1608         size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1609         iommu->domains = kzalloc(size, GFP_KERNEL);
1610
1611         if (iommu->domains) {
1612                 size = 256 * sizeof(struct dmar_domain *);
1613                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1614         }
1615
1616         if (!iommu->domains || !iommu->domains[0]) {
1617                 pr_err("%s: Allocating domain array failed\n",
1618                        iommu->name);
1619                 kfree(iommu->domain_ids);
1620                 kfree(iommu->domains);
1621                 iommu->domain_ids = NULL;
1622                 iommu->domains    = NULL;
1623                 return -ENOMEM;
1624         }
1625
1626
1627
1628         /*
1629          * If Caching mode is set, then invalid translations are tagged
1630          * with domain-id 0, hence we need to pre-allocate it. We also
1631          * use domain-id 0 as a marker for non-allocated domain-id, so
1632          * make sure it is not used for a real domain.
1633          */
1634         set_bit(0, iommu->domain_ids);
1635
1636         /*
1637          * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1638          * entry for first-level or pass-through translation modes should
1639          * be programmed with a domain id different from those used for
1640          * second-level or nested translation. We reserve a domain id for
1641          * this purpose.
1642          */
1643         if (sm_supported(iommu))
1644                 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1645
1646         return 0;
1647 }
1648
1649 static void disable_dmar_iommu(struct intel_iommu *iommu)
1650 {
1651         struct device_domain_info *info, *tmp;
1652         unsigned long flags;
1653
1654         if (!iommu->domains || !iommu->domain_ids)
1655                 return;
1656
1657 again:
1658         spin_lock_irqsave(&device_domain_lock, flags);
1659         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1660                 struct dmar_domain *domain;
1661
1662                 if (info->iommu != iommu)
1663                         continue;
1664
1665                 if (!info->dev || !info->domain)
1666                         continue;
1667
1668                 domain = info->domain;
1669
1670                 __dmar_remove_one_dev_info(info);
1671
1672                 if (!domain_type_is_vm_or_si(domain)) {
1673                         /*
1674                          * The domain_exit() function  can't be called under
1675                          * device_domain_lock, as it takes this lock itself.
1676                          * So release the lock here and re-run the loop
1677                          * afterwards.
1678                          */
1679                         spin_unlock_irqrestore(&device_domain_lock, flags);
1680                         domain_exit(domain);
1681                         goto again;
1682                 }
1683         }
1684         spin_unlock_irqrestore(&device_domain_lock, flags);
1685
1686         if (iommu->gcmd & DMA_GCMD_TE)
1687                 iommu_disable_translation(iommu);
1688 }
1689
1690 static void free_dmar_iommu(struct intel_iommu *iommu)
1691 {
1692         if ((iommu->domains) && (iommu->domain_ids)) {
1693                 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1694                 int i;
1695
1696                 for (i = 0; i < elems; i++)
1697                         kfree(iommu->domains[i]);
1698                 kfree(iommu->domains);
1699                 kfree(iommu->domain_ids);
1700                 iommu->domains = NULL;
1701                 iommu->domain_ids = NULL;
1702         }
1703
1704         g_iommus[iommu->seq_id] = NULL;
1705
1706         /* free context mapping */
1707         free_context_table(iommu);
1708
1709 #ifdef CONFIG_INTEL_IOMMU_SVM
1710         if (pasid_supported(iommu)) {
1711                 if (ecap_prs(iommu->ecap))
1712                         intel_svm_finish_prq(iommu);
1713         }
1714 #endif
1715 }
1716
1717 static struct dmar_domain *alloc_domain(int flags)
1718 {
1719         struct dmar_domain *domain;
1720
1721         domain = alloc_domain_mem();
1722         if (!domain)
1723                 return NULL;
1724
1725         memset(domain, 0, sizeof(*domain));
1726         domain->nid = NUMA_NO_NODE;
1727         domain->flags = flags;
1728         domain->has_iotlb_device = false;
1729         INIT_LIST_HEAD(&domain->devices);
1730
1731         return domain;
1732 }
1733
1734 /* Must be called with iommu->lock */
1735 static int domain_attach_iommu(struct dmar_domain *domain,
1736                                struct intel_iommu *iommu)
1737 {
1738         unsigned long ndomains;
1739         int num;
1740
1741         assert_spin_locked(&device_domain_lock);
1742         assert_spin_locked(&iommu->lock);
1743
1744         domain->iommu_refcnt[iommu->seq_id] += 1;
1745         domain->iommu_count += 1;
1746         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1747                 ndomains = cap_ndoms(iommu->cap);
1748                 num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1749
1750                 if (num >= ndomains) {
1751                         pr_err("%s: No free domain ids\n", iommu->name);
1752                         domain->iommu_refcnt[iommu->seq_id] -= 1;
1753                         domain->iommu_count -= 1;
1754                         return -ENOSPC;
1755                 }
1756
1757                 set_bit(num, iommu->domain_ids);
1758                 set_iommu_domain(iommu, num, domain);
1759
1760                 domain->iommu_did[iommu->seq_id] = num;
1761                 domain->nid                      = iommu->node;
1762
1763                 domain_update_iommu_cap(domain);
1764         }
1765
1766         return 0;
1767 }
1768
1769 static int domain_detach_iommu(struct dmar_domain *domain,
1770                                struct intel_iommu *iommu)
1771 {
1772         int num, count;
1773
1774         assert_spin_locked(&device_domain_lock);
1775         assert_spin_locked(&iommu->lock);
1776
1777         domain->iommu_refcnt[iommu->seq_id] -= 1;
1778         count = --domain->iommu_count;
1779         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1780                 num = domain->iommu_did[iommu->seq_id];
1781                 clear_bit(num, iommu->domain_ids);
1782                 set_iommu_domain(iommu, num, NULL);
1783
1784                 domain_update_iommu_cap(domain);
1785                 domain->iommu_did[iommu->seq_id] = 0;
1786         }
1787
1788         return count;
1789 }
1790
1791 static struct iova_domain reserved_iova_list;
1792 static struct lock_class_key reserved_rbtree_key;
1793
1794 static int dmar_init_reserved_ranges(void)
1795 {
1796         struct pci_dev *pdev = NULL;
1797         struct iova *iova;
1798         int i;
1799
1800         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1801
1802         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1803                 &reserved_rbtree_key);
1804
1805         /* IOAPIC ranges shouldn't be accessed by DMA */
1806         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1807                 IOVA_PFN(IOAPIC_RANGE_END));
1808         if (!iova) {
1809                 pr_err("Reserve IOAPIC range failed\n");
1810                 return -ENODEV;
1811         }
1812
1813         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1814         for_each_pci_dev(pdev) {
1815                 struct resource *r;
1816
1817                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1818                         r = &pdev->resource[i];
1819                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1820                                 continue;
1821                         iova = reserve_iova(&reserved_iova_list,
1822                                             IOVA_PFN(r->start),
1823                                             IOVA_PFN(r->end));
1824                         if (!iova) {
1825                                 pci_err(pdev, "Reserve iova for %pR failed\n", r);
1826                                 return -ENODEV;
1827                         }
1828                 }
1829         }
1830         return 0;
1831 }
1832
1833 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1834 {
1835         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1836 }
1837
1838 static inline int guestwidth_to_adjustwidth(int gaw)
1839 {
1840         int agaw;
1841         int r = (gaw - 12) % 9;
1842
1843         if (r == 0)
1844                 agaw = gaw;
1845         else
1846                 agaw = gaw + 9 - r;
1847         if (agaw > 64)
1848                 agaw = 64;
1849         return agaw;
1850 }
1851
1852 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1853                        int guest_width)
1854 {
1855         int adjust_width, agaw;
1856         unsigned long sagaw;
1857         int err;
1858
1859         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1860
1861         err = init_iova_flush_queue(&domain->iovad,
1862                                     iommu_flush_iova, iova_entry_free);
1863         if (err)
1864                 return err;
1865
1866         domain_reserve_special_ranges(domain);
1867
1868         /* calculate AGAW */
1869         if (guest_width > cap_mgaw(iommu->cap))
1870                 guest_width = cap_mgaw(iommu->cap);
1871         domain->gaw = guest_width;
1872         adjust_width = guestwidth_to_adjustwidth(guest_width);
1873         agaw = width_to_agaw(adjust_width);
1874         sagaw = cap_sagaw(iommu->cap);
1875         if (!test_bit(agaw, &sagaw)) {
1876                 /* hardware doesn't support it, choose a bigger one */
1877                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1878                 agaw = find_next_bit(&sagaw, 5, agaw);
1879                 if (agaw >= 5)
1880                         return -ENODEV;
1881         }
1882         domain->agaw = agaw;
1883
1884         if (ecap_coherent(iommu->ecap))
1885                 domain->iommu_coherency = 1;
1886         else
1887                 domain->iommu_coherency = 0;
1888
1889         if (ecap_sc_support(iommu->ecap))
1890                 domain->iommu_snooping = 1;
1891         else
1892                 domain->iommu_snooping = 0;
1893
1894         if (intel_iommu_superpage)
1895                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1896         else
1897                 domain->iommu_superpage = 0;
1898
1899         domain->nid = iommu->node;
1900
1901         /* always allocate the top pgd */
1902         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1903         if (!domain->pgd)
1904                 return -ENOMEM;
1905         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1906         return 0;
1907 }
1908
1909 static void domain_exit(struct dmar_domain *domain)
1910 {
1911         struct page *freelist;
1912
1913         /* Remove associated devices and clear attached or cached domains */
1914         rcu_read_lock();
1915         domain_remove_dev_info(domain);
1916         rcu_read_unlock();
1917
1918         /* destroy iovas */
1919         put_iova_domain(&domain->iovad);
1920
1921         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1922
1923         dma_free_pagelist(freelist);
1924
1925         free_domain_mem(domain);
1926 }
1927
1928 /*
1929  * Get the PASID directory size for scalable mode context entry.
1930  * Value of X in the PDTS field of a scalable mode context entry
1931  * indicates PASID directory with 2^(X + 7) entries.
1932  */
1933 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1934 {
1935         int pds, max_pde;
1936
1937         max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1938         pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1939         if (pds < 7)
1940                 return 0;
1941
1942         return pds - 7;
1943 }
1944
1945 /*
1946  * Set the RID_PASID field of a scalable mode context entry. The
1947  * IOMMU hardware will use the PASID value set in this field for
1948  * DMA translations of DMA requests without PASID.
1949  */
1950 static inline void
1951 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1952 {
1953         context->hi |= pasid & ((1 << 20) - 1);
1954         context->hi |= (1 << 20);
1955 }
1956
1957 /*
1958  * Set the DTE(Device-TLB Enable) field of a scalable mode context
1959  * entry.
1960  */
1961 static inline void context_set_sm_dte(struct context_entry *context)
1962 {
1963         context->lo |= (1 << 2);
1964 }
1965
1966 /*
1967  * Set the PRE(Page Request Enable) field of a scalable mode context
1968  * entry.
1969  */
1970 static inline void context_set_sm_pre(struct context_entry *context)
1971 {
1972         context->lo |= (1 << 4);
1973 }
1974
1975 /* Convert value to context PASID directory size field coding. */
1976 #define context_pdts(pds)       (((pds) & 0x7) << 9)
1977
1978 static int domain_context_mapping_one(struct dmar_domain *domain,
1979                                       struct intel_iommu *iommu,
1980                                       struct pasid_table *table,
1981                                       u8 bus, u8 devfn)
1982 {
1983         u16 did = domain->iommu_did[iommu->seq_id];
1984         int translation = CONTEXT_TT_MULTI_LEVEL;
1985         struct device_domain_info *info = NULL;
1986         struct context_entry *context;
1987         unsigned long flags;
1988         int ret;
1989
1990         WARN_ON(did == 0);
1991
1992         if (hw_pass_through && domain_type_is_si(domain))
1993                 translation = CONTEXT_TT_PASS_THROUGH;
1994
1995         pr_debug("Set context mapping for %02x:%02x.%d\n",
1996                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1997
1998         BUG_ON(!domain->pgd);
1999
2000         spin_lock_irqsave(&device_domain_lock, flags);
2001         spin_lock(&iommu->lock);
2002
2003         ret = -ENOMEM;
2004         context = iommu_context_addr(iommu, bus, devfn, 1);
2005         if (!context)
2006                 goto out_unlock;
2007
2008         ret = 0;
2009         if (context_present(context))
2010                 goto out_unlock;
2011
2012         /*
2013          * For kdump cases, old valid entries may be cached due to the
2014          * in-flight DMA and copied pgtable, but there is no unmapping
2015          * behaviour for them, thus we need an explicit cache flush for
2016          * the newly-mapped device. For kdump, at this point, the device
2017          * is supposed to finish reset at its driver probe stage, so no
2018          * in-flight DMA will exist, and we don't need to worry anymore
2019          * hereafter.
2020          */
2021         if (context_copied(context)) {
2022                 u16 did_old = context_domain_id(context);
2023
2024                 if (did_old < cap_ndoms(iommu->cap)) {
2025                         iommu->flush.flush_context(iommu, did_old,
2026                                                    (((u16)bus) << 8) | devfn,
2027                                                    DMA_CCMD_MASK_NOBIT,
2028                                                    DMA_CCMD_DEVICE_INVL);
2029                         iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2030                                                  DMA_TLB_DSI_FLUSH);
2031                 }
2032         }
2033
2034         context_clear_entry(context);
2035
2036         if (sm_supported(iommu)) {
2037                 unsigned long pds;
2038
2039                 WARN_ON(!table);
2040
2041                 /* Setup the PASID DIR pointer: */
2042                 pds = context_get_sm_pds(table);
2043                 context->lo = (u64)virt_to_phys(table->table) |
2044                                 context_pdts(pds);
2045
2046                 /* Setup the RID_PASID field: */
2047                 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2048
2049                 /*
2050                  * Setup the Device-TLB enable bit and Page request
2051                  * Enable bit:
2052                  */
2053                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2054                 if (info && info->ats_supported)
2055                         context_set_sm_dte(context);
2056                 if (info && info->pri_supported)
2057                         context_set_sm_pre(context);
2058         } else {
2059                 struct dma_pte *pgd = domain->pgd;
2060                 int agaw;
2061
2062                 context_set_domain_id(context, did);
2063
2064                 if (translation != CONTEXT_TT_PASS_THROUGH) {
2065                         /*
2066                          * Skip top levels of page tables for iommu which has
2067                          * less agaw than default. Unnecessary for PT mode.
2068                          */
2069                         for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2070                                 ret = -ENOMEM;
2071                                 pgd = phys_to_virt(dma_pte_addr(pgd));
2072                                 if (!dma_pte_present(pgd))
2073                                         goto out_unlock;
2074                         }
2075
2076                         info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2077                         if (info && info->ats_supported)
2078                                 translation = CONTEXT_TT_DEV_IOTLB;
2079                         else
2080                                 translation = CONTEXT_TT_MULTI_LEVEL;
2081
2082                         context_set_address_root(context, virt_to_phys(pgd));
2083                         context_set_address_width(context, agaw);
2084                 } else {
2085                         /*
2086                          * In pass through mode, AW must be programmed to
2087                          * indicate the largest AGAW value supported by
2088                          * hardware. And ASR is ignored by hardware.
2089                          */
2090                         context_set_address_width(context, iommu->msagaw);
2091                 }
2092
2093                 context_set_translation_type(context, translation);
2094         }
2095
2096         context_set_fault_enable(context);
2097         context_set_present(context);
2098         domain_flush_cache(domain, context, sizeof(*context));
2099
2100         /*
2101          * It's a non-present to present mapping. If hardware doesn't cache
2102          * non-present entry we only need to flush the write-buffer. If the
2103          * _does_ cache non-present entries, then it does so in the special
2104          * domain #0, which we have to flush:
2105          */
2106         if (cap_caching_mode(iommu->cap)) {
2107                 iommu->flush.flush_context(iommu, 0,
2108                                            (((u16)bus) << 8) | devfn,
2109                                            DMA_CCMD_MASK_NOBIT,
2110                                            DMA_CCMD_DEVICE_INVL);
2111                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2112         } else {
2113                 iommu_flush_write_buffer(iommu);
2114         }
2115         iommu_enable_dev_iotlb(info);
2116
2117         ret = 0;
2118
2119 out_unlock:
2120         spin_unlock(&iommu->lock);
2121         spin_unlock_irqrestore(&device_domain_lock, flags);
2122
2123         return ret;
2124 }
2125
2126 struct domain_context_mapping_data {
2127         struct dmar_domain *domain;
2128         struct intel_iommu *iommu;
2129         struct pasid_table *table;
2130 };
2131
2132 static int domain_context_mapping_cb(struct pci_dev *pdev,
2133                                      u16 alias, void *opaque)
2134 {
2135         struct domain_context_mapping_data *data = opaque;
2136
2137         return domain_context_mapping_one(data->domain, data->iommu,
2138                                           data->table, PCI_BUS_NUM(alias),
2139                                           alias & 0xff);
2140 }
2141
2142 static int
2143 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2144 {
2145         struct domain_context_mapping_data data;
2146         struct pasid_table *table;
2147         struct intel_iommu *iommu;
2148         u8 bus, devfn;
2149
2150         iommu = device_to_iommu(dev, &bus, &devfn);
2151         if (!iommu)
2152                 return -ENODEV;
2153
2154         table = intel_pasid_get_table(dev);
2155
2156         if (!dev_is_pci(dev))
2157                 return domain_context_mapping_one(domain, iommu, table,
2158                                                   bus, devfn);
2159
2160         data.domain = domain;
2161         data.iommu = iommu;
2162         data.table = table;
2163
2164         return pci_for_each_dma_alias(to_pci_dev(dev),
2165                                       &domain_context_mapping_cb, &data);
2166 }
2167
2168 static int domain_context_mapped_cb(struct pci_dev *pdev,
2169                                     u16 alias, void *opaque)
2170 {
2171         struct intel_iommu *iommu = opaque;
2172
2173         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2174 }
2175
2176 static int domain_context_mapped(struct device *dev)
2177 {
2178         struct intel_iommu *iommu;
2179         u8 bus, devfn;
2180
2181         iommu = device_to_iommu(dev, &bus, &devfn);
2182         if (!iommu)
2183                 return -ENODEV;
2184
2185         if (!dev_is_pci(dev))
2186                 return device_context_mapped(iommu, bus, devfn);
2187
2188         return !pci_for_each_dma_alias(to_pci_dev(dev),
2189                                        domain_context_mapped_cb, iommu);
2190 }
2191
2192 /* Returns a number of VTD pages, but aligned to MM page size */
2193 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2194                                             size_t size)
2195 {
2196         host_addr &= ~PAGE_MASK;
2197         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2198 }
2199
2200 /* Return largest possible superpage level for a given mapping */
2201 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2202                                           unsigned long iov_pfn,
2203                                           unsigned long phy_pfn,
2204                                           unsigned long pages)
2205 {
2206         int support, level = 1;
2207         unsigned long pfnmerge;
2208
2209         support = domain->iommu_superpage;
2210
2211         /* To use a large page, the virtual *and* physical addresses
2212            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2213            of them will mean we have to use smaller pages. So just
2214            merge them and check both at once. */
2215         pfnmerge = iov_pfn | phy_pfn;
2216
2217         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2218                 pages >>= VTD_STRIDE_SHIFT;
2219                 if (!pages)
2220                         break;
2221                 pfnmerge >>= VTD_STRIDE_SHIFT;
2222                 level++;
2223                 support--;
2224         }
2225         return level;
2226 }
2227
2228 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2229                             struct scatterlist *sg, unsigned long phys_pfn,
2230                             unsigned long nr_pages, int prot)
2231 {
2232         struct dma_pte *first_pte = NULL, *pte = NULL;
2233         phys_addr_t uninitialized_var(pteval);
2234         unsigned long sg_res = 0;
2235         unsigned int largepage_lvl = 0;
2236         unsigned long lvl_pages = 0;
2237
2238         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2239
2240         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2241                 return -EINVAL;
2242
2243         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2244
2245         if (!sg) {
2246                 sg_res = nr_pages;
2247                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2248         }
2249
2250         while (nr_pages > 0) {
2251                 uint64_t tmp;
2252
2253                 if (!sg_res) {
2254                         unsigned int pgoff = sg->offset & ~PAGE_MASK;
2255
2256                         sg_res = aligned_nrpages(sg->offset, sg->length);
2257                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2258                         sg->dma_length = sg->length;
2259                         pteval = (sg_phys(sg) - pgoff) | prot;
2260                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2261                 }
2262
2263                 if (!pte) {
2264                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2265
2266                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2267                         if (!pte)
2268                                 return -ENOMEM;
2269                         /* It is large page*/
2270                         if (largepage_lvl > 1) {
2271                                 unsigned long nr_superpages, end_pfn;
2272
2273                                 pteval |= DMA_PTE_LARGE_PAGE;
2274                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2275
2276                                 nr_superpages = sg_res / lvl_pages;
2277                                 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2278
2279                                 /*
2280                                  * Ensure that old small page tables are
2281                                  * removed to make room for superpage(s).
2282                                  * We're adding new large pages, so make sure
2283                                  * we don't remove their parent tables.
2284                                  */
2285                                 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2286                                                        largepage_lvl + 1);
2287                         } else {
2288                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2289                         }
2290
2291                 }
2292                 /* We don't need lock here, nobody else
2293                  * touches the iova range
2294                  */
2295                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2296                 if (tmp) {
2297                         static int dumps = 5;
2298                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2299                                 iov_pfn, tmp, (unsigned long long)pteval);
2300                         if (dumps) {
2301                                 dumps--;
2302                                 debug_dma_dump_mappings(NULL);
2303                         }
2304                         WARN_ON(1);
2305                 }
2306
2307                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2308
2309                 BUG_ON(nr_pages < lvl_pages);
2310                 BUG_ON(sg_res < lvl_pages);
2311
2312                 nr_pages -= lvl_pages;
2313                 iov_pfn += lvl_pages;
2314                 phys_pfn += lvl_pages;
2315                 pteval += lvl_pages * VTD_PAGE_SIZE;
2316                 sg_res -= lvl_pages;
2317
2318                 /* If the next PTE would be the first in a new page, then we
2319                    need to flush the cache on the entries we've just written.
2320                    And then we'll need to recalculate 'pte', so clear it and
2321                    let it get set again in the if (!pte) block above.
2322
2323                    If we're done (!nr_pages) we need to flush the cache too.
2324
2325                    Also if we've been setting superpages, we may need to
2326                    recalculate 'pte' and switch back to smaller pages for the
2327                    end of the mapping, if the trailing size is not enough to
2328                    use another superpage (i.e. sg_res < lvl_pages). */
2329                 pte++;
2330                 if (!nr_pages || first_pte_in_page(pte) ||
2331                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2332                         domain_flush_cache(domain, first_pte,
2333                                            (void *)pte - (void *)first_pte);
2334                         pte = NULL;
2335                 }
2336
2337                 if (!sg_res && nr_pages)
2338                         sg = sg_next(sg);
2339         }
2340         return 0;
2341 }
2342
2343 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2344                           struct scatterlist *sg, unsigned long phys_pfn,
2345                           unsigned long nr_pages, int prot)
2346 {
2347         int ret;
2348         struct intel_iommu *iommu;
2349
2350         /* Do the real mapping first */
2351         ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2352         if (ret)
2353                 return ret;
2354
2355         /* Notify about the new mapping */
2356         if (domain_type_is_vm(domain)) {
2357                 /* VM typed domains can have more than one IOMMUs */
2358                 int iommu_id;
2359
2360                 for_each_domain_iommu(iommu_id, domain) {
2361                         iommu = g_iommus[iommu_id];
2362                         __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2363                 }
2364         } else {
2365                 /* General domains only have one IOMMU */
2366                 iommu = domain_get_iommu(domain);
2367                 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2368         }
2369
2370         return 0;
2371 }
2372
2373 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2374                                     struct scatterlist *sg, unsigned long nr_pages,
2375                                     int prot)
2376 {
2377         return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2378 }
2379
2380 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2381                                      unsigned long phys_pfn, unsigned long nr_pages,
2382                                      int prot)
2383 {
2384         return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2385 }
2386
2387 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2388 {
2389         unsigned long flags;
2390         struct context_entry *context;
2391         u16 did_old;
2392
2393         if (!iommu)
2394                 return;
2395
2396         spin_lock_irqsave(&iommu->lock, flags);
2397         context = iommu_context_addr(iommu, bus, devfn, 0);
2398         if (!context) {
2399                 spin_unlock_irqrestore(&iommu->lock, flags);
2400                 return;
2401         }
2402         did_old = context_domain_id(context);
2403         context_clear_entry(context);
2404         __iommu_flush_cache(iommu, context, sizeof(*context));
2405         spin_unlock_irqrestore(&iommu->lock, flags);
2406         iommu->flush.flush_context(iommu,
2407                                    did_old,
2408                                    (((u16)bus) << 8) | devfn,
2409                                    DMA_CCMD_MASK_NOBIT,
2410                                    DMA_CCMD_DEVICE_INVL);
2411         iommu->flush.flush_iotlb(iommu,
2412                                  did_old,
2413                                  0,
2414                                  0,
2415                                  DMA_TLB_DSI_FLUSH);
2416 }
2417
2418 static inline void unlink_domain_info(struct device_domain_info *info)
2419 {
2420         assert_spin_locked(&device_domain_lock);
2421         list_del(&info->link);
2422         list_del(&info->global);
2423         if (info->dev)
2424                 info->dev->archdata.iommu = NULL;
2425 }
2426
2427 static void domain_remove_dev_info(struct dmar_domain *domain)
2428 {
2429         struct device_domain_info *info, *tmp;
2430         unsigned long flags;
2431
2432         spin_lock_irqsave(&device_domain_lock, flags);
2433         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2434                 __dmar_remove_one_dev_info(info);
2435         spin_unlock_irqrestore(&device_domain_lock, flags);
2436 }
2437
2438 /*
2439  * find_domain
2440  * Note: we use struct device->archdata.iommu stores the info
2441  */
2442 static struct dmar_domain *find_domain(struct device *dev)
2443 {
2444         struct device_domain_info *info;
2445
2446         /* No lock here, assumes no domain exit in normal case */
2447         info = dev->archdata.iommu;
2448         if (likely(info))
2449                 return info->domain;
2450         return NULL;
2451 }
2452
2453 static inline struct device_domain_info *
2454 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2455 {
2456         struct device_domain_info *info;
2457
2458         list_for_each_entry(info, &device_domain_list, global)
2459                 if (info->iommu->segment == segment && info->bus == bus &&
2460                     info->devfn == devfn)
2461                         return info;
2462
2463         return NULL;
2464 }
2465
2466 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2467                                                     int bus, int devfn,
2468                                                     struct device *dev,
2469                                                     struct dmar_domain *domain)
2470 {
2471         struct dmar_domain *found = NULL;
2472         struct device_domain_info *info;
2473         unsigned long flags;
2474         int ret;
2475
2476         info = alloc_devinfo_mem();
2477         if (!info)
2478                 return NULL;
2479
2480         info->bus = bus;
2481         info->devfn = devfn;
2482         info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2483         info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2484         info->ats_qdep = 0;
2485         info->dev = dev;
2486         info->domain = domain;
2487         info->iommu = iommu;
2488         info->pasid_table = NULL;
2489         info->auxd_enabled = 0;
2490         INIT_LIST_HEAD(&info->auxiliary_domains);
2491
2492         if (dev && dev_is_pci(dev)) {
2493                 struct pci_dev *pdev = to_pci_dev(info->dev);
2494
2495                 if (!pdev->untrusted &&
2496                     !pci_ats_disabled() &&
2497                     ecap_dev_iotlb_support(iommu->ecap) &&
2498                     pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2499                     dmar_find_matched_atsr_unit(pdev))
2500                         info->ats_supported = 1;
2501
2502                 if (sm_supported(iommu)) {
2503                         if (pasid_supported(iommu)) {
2504                                 int features = pci_pasid_features(pdev);
2505                                 if (features >= 0)
2506                                         info->pasid_supported = features | 1;
2507                         }
2508
2509                         if (info->ats_supported && ecap_prs(iommu->ecap) &&
2510                             pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2511                                 info->pri_supported = 1;
2512                 }
2513         }
2514
2515         spin_lock_irqsave(&device_domain_lock, flags);
2516         if (dev)
2517                 found = find_domain(dev);
2518
2519         if (!found) {
2520                 struct device_domain_info *info2;
2521                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2522                 if (info2) {
2523                         found      = info2->domain;
2524                         info2->dev = dev;
2525                 }
2526         }
2527
2528         if (found) {
2529                 spin_unlock_irqrestore(&device_domain_lock, flags);
2530                 free_devinfo_mem(info);
2531                 /* Caller must free the original domain */
2532                 return found;
2533         }
2534
2535         spin_lock(&iommu->lock);
2536         ret = domain_attach_iommu(domain, iommu);
2537         spin_unlock(&iommu->lock);
2538
2539         if (ret) {
2540                 spin_unlock_irqrestore(&device_domain_lock, flags);
2541                 free_devinfo_mem(info);
2542                 return NULL;
2543         }
2544
2545         list_add(&info->link, &domain->devices);
2546         list_add(&info->global, &device_domain_list);
2547         if (dev)
2548                 dev->archdata.iommu = info;
2549         spin_unlock_irqrestore(&device_domain_lock, flags);
2550
2551         /* PASID table is mandatory for a PCI device in scalable mode. */
2552         if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2553                 ret = intel_pasid_alloc_table(dev);
2554                 if (ret) {
2555                         dev_err(dev, "PASID table allocation failed\n");
2556                         dmar_remove_one_dev_info(dev);
2557                         return NULL;
2558                 }
2559
2560                 /* Setup the PASID entry for requests without PASID: */
2561                 spin_lock(&iommu->lock);
2562                 if (hw_pass_through && domain_type_is_si(domain))
2563                         ret = intel_pasid_setup_pass_through(iommu, domain,
2564                                         dev, PASID_RID2PASID);
2565                 else
2566                         ret = intel_pasid_setup_second_level(iommu, domain,
2567                                         dev, PASID_RID2PASID);
2568                 spin_unlock(&iommu->lock);
2569                 if (ret) {
2570                         dev_err(dev, "Setup RID2PASID failed\n");
2571                         dmar_remove_one_dev_info(dev);
2572                         return NULL;
2573                 }
2574         }
2575
2576         if (dev && domain_context_mapping(domain, dev)) {
2577                 dev_err(dev, "Domain context map failed\n");
2578                 dmar_remove_one_dev_info(dev);
2579                 return NULL;
2580         }
2581
2582         return domain;
2583 }
2584
2585 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2586 {
2587         *(u16 *)opaque = alias;
2588         return 0;
2589 }
2590
2591 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2592 {
2593         struct device_domain_info *info;
2594         struct dmar_domain *domain = NULL;
2595         struct intel_iommu *iommu;
2596         u16 dma_alias;
2597         unsigned long flags;
2598         u8 bus, devfn;
2599
2600         iommu = device_to_iommu(dev, &bus, &devfn);
2601         if (!iommu)
2602                 return NULL;
2603
2604         if (dev_is_pci(dev)) {
2605                 struct pci_dev *pdev = to_pci_dev(dev);
2606
2607                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2608
2609                 spin_lock_irqsave(&device_domain_lock, flags);
2610                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2611                                                       PCI_BUS_NUM(dma_alias),
2612                                                       dma_alias & 0xff);
2613                 if (info) {
2614                         iommu = info->iommu;
2615                         domain = info->domain;
2616                 }
2617                 spin_unlock_irqrestore(&device_domain_lock, flags);
2618
2619                 /* DMA alias already has a domain, use it */
2620                 if (info)
2621                         goto out;
2622         }
2623
2624         /* Allocate and initialize new domain for the device */
2625         domain = alloc_domain(0);
2626         if (!domain)
2627                 return NULL;
2628         if (domain_init(domain, iommu, gaw)) {
2629                 domain_exit(domain);
2630                 return NULL;
2631         }
2632
2633 out:
2634
2635         return domain;
2636 }
2637
2638 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2639                                               struct dmar_domain *domain)
2640 {
2641         struct intel_iommu *iommu;
2642         struct dmar_domain *tmp;
2643         u16 req_id, dma_alias;
2644         u8 bus, devfn;
2645
2646         iommu = device_to_iommu(dev, &bus, &devfn);
2647         if (!iommu)
2648                 return NULL;
2649
2650         req_id = ((u16)bus << 8) | devfn;
2651
2652         if (dev_is_pci(dev)) {
2653                 struct pci_dev *pdev = to_pci_dev(dev);
2654
2655                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2656
2657                 /* register PCI DMA alias device */
2658                 if (req_id != dma_alias) {
2659                         tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2660                                         dma_alias & 0xff, NULL, domain);
2661
2662                         if (!tmp || tmp != domain)
2663                                 return tmp;
2664                 }
2665         }
2666
2667         tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2668         if (!tmp || tmp != domain)
2669                 return tmp;
2670
2671         return domain;
2672 }
2673
2674 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2675 {
2676         struct dmar_domain *domain, *tmp;
2677
2678         domain = find_domain(dev);
2679         if (domain)
2680                 goto out;
2681
2682         domain = find_or_alloc_domain(dev, gaw);
2683         if (!domain)
2684                 goto out;
2685
2686         tmp = set_domain_for_dev(dev, domain);
2687         if (!tmp || domain != tmp) {
2688                 domain_exit(domain);
2689                 domain = tmp;
2690         }
2691
2692 out:
2693
2694         return domain;
2695 }
2696
2697 static int iommu_domain_identity_map(struct dmar_domain *domain,
2698                                      unsigned long long start,
2699                                      unsigned long long end)
2700 {
2701         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2702         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2703
2704         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2705                           dma_to_mm_pfn(last_vpfn))) {
2706                 pr_err("Reserving iova failed\n");
2707                 return -ENOMEM;
2708         }
2709
2710         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2711         /*
2712          * RMRR range might have overlap with physical memory range,
2713          * clear it first
2714          */
2715         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2716
2717         return __domain_mapping(domain, first_vpfn, NULL,
2718                                 first_vpfn, last_vpfn - first_vpfn + 1,
2719                                 DMA_PTE_READ|DMA_PTE_WRITE);
2720 }
2721
2722 static int domain_prepare_identity_map(struct device *dev,
2723                                        struct dmar_domain *domain,
2724                                        unsigned long long start,
2725                                        unsigned long long end)
2726 {
2727         /* For _hardware_ passthrough, don't bother. But for software
2728            passthrough, we do it anyway -- it may indicate a memory
2729            range which is reserved in E820, so which didn't get set
2730            up to start with in si_domain */
2731         if (domain == si_domain && hw_pass_through) {
2732                 dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
2733                          start, end);
2734                 return 0;
2735         }
2736
2737         dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
2738
2739         if (end < start) {
2740                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2741                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2742                         dmi_get_system_info(DMI_BIOS_VENDOR),
2743                         dmi_get_system_info(DMI_BIOS_VERSION),
2744                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2745                 return -EIO;
2746         }
2747
2748         if (end >> agaw_to_width(domain->agaw)) {
2749                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2750                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2751                      agaw_to_width(domain->agaw),
2752                      dmi_get_system_info(DMI_BIOS_VENDOR),
2753                      dmi_get_system_info(DMI_BIOS_VERSION),
2754                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2755                 return -EIO;
2756         }
2757
2758         return iommu_domain_identity_map(domain, start, end);
2759 }
2760
2761 static int iommu_prepare_identity_map(struct device *dev,
2762                                       unsigned long long start,
2763                                       unsigned long long end)
2764 {
2765         struct dmar_domain *domain;
2766         int ret;
2767
2768         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2769         if (!domain)
2770                 return -ENOMEM;
2771
2772         ret = domain_prepare_identity_map(dev, domain, start, end);
2773         if (ret)
2774                 domain_exit(domain);
2775
2776         return ret;
2777 }
2778
2779 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2780                                          struct device *dev)
2781 {
2782         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2783                 return 0;
2784         return iommu_prepare_identity_map(dev, rmrr->base_address,
2785                                           rmrr->end_address);
2786 }
2787
2788 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2789 static inline void iommu_prepare_isa(void)
2790 {
2791         struct pci_dev *pdev;
2792         int ret;
2793
2794         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2795         if (!pdev)
2796                 return;
2797
2798         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2799         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2800
2801         if (ret)
2802                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2803
2804         pci_dev_put(pdev);
2805 }
2806 #else
2807 static inline void iommu_prepare_isa(void)
2808 {
2809         return;
2810 }
2811 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2812
2813 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2814
2815 static int __init si_domain_init(int hw)
2816 {
2817         int nid, ret;
2818
2819         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2820         if (!si_domain)
2821                 return -EFAULT;
2822
2823         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2824                 domain_exit(si_domain);
2825                 return -EFAULT;
2826         }
2827
2828         pr_debug("Identity mapping domain allocated\n");
2829
2830         if (hw)
2831                 return 0;
2832
2833         for_each_online_node(nid) {
2834                 unsigned long start_pfn, end_pfn;
2835                 int i;
2836
2837                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2838                         ret = iommu_domain_identity_map(si_domain,
2839                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2840                         if (ret)
2841                                 return ret;
2842                 }
2843         }
2844
2845         return 0;
2846 }
2847
2848 static int identity_mapping(struct device *dev)
2849 {
2850         struct device_domain_info *info;
2851
2852         if (likely(!iommu_identity_mapping))
2853                 return 0;
2854
2855         info = dev->archdata.iommu;
2856         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2857                 return (info->domain == si_domain);
2858
2859         return 0;
2860 }
2861
2862 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2863 {
2864         struct dmar_domain *ndomain;
2865         struct intel_iommu *iommu;
2866         u8 bus, devfn;
2867
2868         iommu = device_to_iommu(dev, &bus, &devfn);
2869         if (!iommu)
2870                 return -ENODEV;
2871
2872         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2873         if (ndomain != domain)
2874                 return -EBUSY;
2875
2876         return 0;
2877 }
2878
2879 static bool device_has_rmrr(struct device *dev)
2880 {
2881         struct dmar_rmrr_unit *rmrr;
2882         struct device *tmp;
2883         int i;
2884
2885         rcu_read_lock();
2886         for_each_rmrr_units(rmrr) {
2887                 /*
2888                  * Return TRUE if this RMRR contains the device that
2889                  * is passed in.
2890                  */
2891                 for_each_active_dev_scope(rmrr->devices,
2892                                           rmrr->devices_cnt, i, tmp)
2893                         if (tmp == dev) {
2894                                 rcu_read_unlock();
2895                                 return true;
2896                         }
2897         }
2898         rcu_read_unlock();
2899         return false;
2900 }
2901
2902 /*
2903  * There are a couple cases where we need to restrict the functionality of
2904  * devices associated with RMRRs.  The first is when evaluating a device for
2905  * identity mapping because problems exist when devices are moved in and out
2906  * of domains and their respective RMRR information is lost.  This means that
2907  * a device with associated RMRRs will never be in a "passthrough" domain.
2908  * The second is use of the device through the IOMMU API.  This interface
2909  * expects to have full control of the IOVA space for the device.  We cannot
2910  * satisfy both the requirement that RMRR access is maintained and have an
2911  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2912  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2913  * We therefore prevent devices associated with an RMRR from participating in
2914  * the IOMMU API, which eliminates them from device assignment.
2915  *
2916  * In both cases we assume that PCI USB devices with RMRRs have them largely
2917  * for historical reasons and that the RMRR space is not actively used post
2918  * boot.  This exclusion may change if vendors begin to abuse it.
2919  *
2920  * The same exception is made for graphics devices, with the requirement that
2921  * any use of the RMRR regions will be torn down before assigning the device
2922  * to a guest.
2923  */
2924 static bool device_is_rmrr_locked(struct device *dev)
2925 {
2926         if (!device_has_rmrr(dev))
2927                 return false;
2928
2929         if (dev_is_pci(dev)) {
2930                 struct pci_dev *pdev = to_pci_dev(dev);
2931
2932                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2933                         return false;
2934         }
2935
2936         return true;
2937 }
2938
2939 static int iommu_should_identity_map(struct device *dev, int startup)
2940 {
2941         if (dev_is_pci(dev)) {
2942                 struct pci_dev *pdev = to_pci_dev(dev);
2943
2944                 if (device_is_rmrr_locked(dev))
2945                         return 0;
2946
2947                 /*
2948                  * Prevent any device marked as untrusted from getting
2949                  * placed into the statically identity mapping domain.
2950                  */
2951                 if (pdev->untrusted)
2952                         return 0;
2953
2954                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2955                         return 1;
2956
2957                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2958                         return 1;
2959
2960                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2961                         return 0;
2962
2963                 /*
2964                  * We want to start off with all devices in the 1:1 domain, and
2965                  * take them out later if we find they can't access all of memory.
2966                  *
2967                  * However, we can't do this for PCI devices behind bridges,
2968                  * because all PCI devices behind the same bridge will end up
2969                  * with the same source-id on their transactions.
2970                  *
2971                  * Practically speaking, we can't change things around for these
2972                  * devices at run-time, because we can't be sure there'll be no
2973                  * DMA transactions in flight for any of their siblings.
2974                  *
2975                  * So PCI devices (unless they're on the root bus) as well as
2976                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2977                  * the 1:1 domain, just in _case_ one of their siblings turns out
2978                  * not to be able to map all of memory.
2979                  */
2980                 if (!pci_is_pcie(pdev)) {
2981                         if (!pci_is_root_bus(pdev->bus))
2982                                 return 0;
2983                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2984                                 return 0;
2985                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2986                         return 0;
2987         } else {
2988                 if (device_has_rmrr(dev))
2989                         return 0;
2990         }
2991
2992         /*
2993          * At boot time, we don't yet know if devices will be 64-bit capable.
2994          * Assume that they will — if they turn out not to be, then we can
2995          * take them out of the 1:1 domain later.
2996          */
2997         if (!startup) {
2998                 /*
2999                  * If the device's dma_mask is less than the system's memory
3000                  * size then this is not a candidate for identity mapping.
3001                  */
3002                 u64 dma_mask = *dev->dma_mask;
3003
3004                 if (dev->coherent_dma_mask &&
3005                     dev->coherent_dma_mask < dma_mask)
3006                         dma_mask = dev->coherent_dma_mask;
3007
3008                 return dma_mask >= dma_get_required_mask(dev);
3009         }
3010
3011         return 1;
3012 }
3013
3014 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
3015 {
3016         int ret;
3017
3018         if (!iommu_should_identity_map(dev, 1))
3019                 return 0;
3020
3021         ret = domain_add_dev_info(si_domain, dev);
3022         if (!ret)
3023                 dev_info(dev, "%s identity mapping\n",
3024                          hw ? "Hardware" : "Software");
3025         else if (ret == -ENODEV)
3026                 /* device not associated with an iommu */
3027                 ret = 0;
3028
3029         return ret;
3030 }
3031
3032
3033 static int __init iommu_prepare_static_identity_mapping(int hw)
3034 {
3035         struct pci_dev *pdev = NULL;
3036         struct dmar_drhd_unit *drhd;
3037         struct intel_iommu *iommu;
3038         struct device *dev;
3039         int i;
3040         int ret = 0;
3041
3042         for_each_pci_dev(pdev) {
3043                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
3044                 if (ret)
3045                         return ret;
3046         }
3047
3048         for_each_active_iommu(iommu, drhd)
3049                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
3050                         struct acpi_device_physical_node *pn;
3051                         struct acpi_device *adev;
3052
3053                         if (dev->bus != &acpi_bus_type)
3054                                 continue;
3055
3056                         adev= to_acpi_device(dev);
3057                         mutex_lock(&adev->physical_node_lock);
3058                         list_for_each_entry(pn, &adev->physical_node_list, node) {
3059                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
3060                                 if (ret)
3061                                         break;
3062                         }
3063                         mutex_unlock(&adev->physical_node_lock);
3064                         if (ret)
3065                                 return ret;
3066                 }
3067
3068         return 0;
3069 }
3070
3071 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3072 {
3073         /*
3074          * Start from the sane iommu hardware state.
3075          * If the queued invalidation is already initialized by us
3076          * (for example, while enabling interrupt-remapping) then
3077          * we got the things already rolling from a sane state.
3078          */
3079         if (!iommu->qi) {
3080                 /*
3081                  * Clear any previous faults.
3082                  */
3083                 dmar_fault(-1, iommu);
3084                 /*
3085                  * Disable queued invalidation if supported and already enabled
3086                  * before OS handover.
3087                  */
3088                 dmar_disable_qi(iommu);
3089         }
3090
3091         if (dmar_enable_qi(iommu)) {
3092                 /*
3093                  * Queued Invalidate not enabled, use Register Based Invalidate
3094                  */
3095                 iommu->flush.flush_context = __iommu_flush_context;
3096                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3097                 pr_info("%s: Using Register based invalidation\n",
3098                         iommu->name);
3099         } else {
3100                 iommu->flush.flush_context = qi_flush_context;
3101                 iommu->flush.flush_iotlb = qi_flush_iotlb;
3102                 pr_info("%s: Using Queued invalidation\n", iommu->name);
3103         }
3104 }
3105
3106 static int copy_context_table(struct intel_iommu *iommu,
3107                               struct root_entry *old_re,
3108                               struct context_entry **tbl,
3109                               int bus, bool ext)
3110 {
3111         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3112         struct context_entry *new_ce = NULL, ce;
3113         struct context_entry *old_ce = NULL;
3114         struct root_entry re;
3115         phys_addr_t old_ce_phys;
3116
3117         tbl_idx = ext ? bus * 2 : bus;
3118         memcpy(&re, old_re, sizeof(re));
3119
3120         for (devfn = 0; devfn < 256; devfn++) {
3121                 /* First calculate the correct index */
3122                 idx = (ext ? devfn * 2 : devfn) % 256;
3123
3124                 if (idx == 0) {
3125                         /* First save what we may have and clean up */
3126                         if (new_ce) {
3127                                 tbl[tbl_idx] = new_ce;
3128                                 __iommu_flush_cache(iommu, new_ce,
3129                                                     VTD_PAGE_SIZE);
3130                                 pos = 1;
3131                         }
3132
3133                         if (old_ce)
3134                                 memunmap(old_ce);
3135
3136                         ret = 0;
3137                         if (devfn < 0x80)
3138                                 old_ce_phys = root_entry_lctp(&re);
3139                         else
3140                                 old_ce_phys = root_entry_uctp(&re);
3141
3142                         if (!old_ce_phys) {
3143                                 if (ext && devfn == 0) {
3144                                         /* No LCTP, try UCTP */
3145                                         devfn = 0x7f;
3146                                         continue;
3147                                 } else {
3148                                         goto out;
3149                                 }
3150                         }
3151
3152                         ret = -ENOMEM;
3153                         old_ce = memremap(old_ce_phys, PAGE_SIZE,
3154                                         MEMREMAP_WB);
3155                         if (!old_ce)
3156                                 goto out;
3157
3158                         new_ce = alloc_pgtable_page(iommu->node);
3159                         if (!new_ce)
3160                                 goto out_unmap;
3161
3162                         ret = 0;
3163                 }
3164
3165                 /* Now copy the context entry */
3166                 memcpy(&ce, old_ce + idx, sizeof(ce));
3167
3168                 if (!__context_present(&ce))
3169                         continue;
3170
3171                 did = context_domain_id(&ce);
3172                 if (did >= 0 && did < cap_ndoms(iommu->cap))
3173                         set_bit(did, iommu->domain_ids);
3174
3175                 /*
3176                  * We need a marker for copied context entries. This
3177                  * marker needs to work for the old format as well as
3178                  * for extended context entries.
3179                  *
3180                  * Bit 67 of the context entry is used. In the old
3181                  * format this bit is available to software, in the
3182                  * extended format it is the PGE bit, but PGE is ignored
3183                  * by HW if PASIDs are disabled (and thus still
3184                  * available).
3185                  *
3186                  * So disable PASIDs first and then mark the entry
3187                  * copied. This means that we don't copy PASID
3188                  * translations from the old kernel, but this is fine as
3189                  * faults there are not fatal.
3190                  */
3191                 context_clear_pasid_enable(&ce);
3192                 context_set_copied(&ce);
3193
3194                 new_ce[idx] = ce;
3195         }
3196
3197         tbl[tbl_idx + pos] = new_ce;
3198
3199         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3200
3201 out_unmap:
3202         memunmap(old_ce);
3203
3204 out:
3205         return ret;
3206 }
3207
3208 static int copy_translation_tables(struct intel_iommu *iommu)
3209 {
3210         struct context_entry **ctxt_tbls;
3211         struct root_entry *old_rt;
3212         phys_addr_t old_rt_phys;
3213         int ctxt_table_entries;
3214         unsigned long flags;
3215         u64 rtaddr_reg;
3216         int bus, ret;
3217         bool new_ext, ext;
3218
3219         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3220         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
3221         new_ext    = !!ecap_ecs(iommu->ecap);
3222
3223         /*
3224          * The RTT bit can only be changed when translation is disabled,
3225          * but disabling translation means to open a window for data
3226          * corruption. So bail out and don't copy anything if we would
3227          * have to change the bit.
3228          */
3229         if (new_ext != ext)
3230                 return -EINVAL;
3231
3232         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3233         if (!old_rt_phys)
3234                 return -EINVAL;
3235
3236         old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3237         if (!old_rt)
3238                 return -ENOMEM;
3239
3240         /* This is too big for the stack - allocate it from slab */
3241         ctxt_table_entries = ext ? 512 : 256;
3242         ret = -ENOMEM;
3243         ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3244         if (!ctxt_tbls)
3245                 goto out_unmap;
3246
3247         for (bus = 0; bus < 256; bus++) {
3248                 ret = copy_context_table(iommu, &old_rt[bus],
3249                                          ctxt_tbls, bus, ext);
3250                 if (ret) {
3251                         pr_err("%s: Failed to copy context table for bus %d\n",
3252                                 iommu->name, bus);
3253                         continue;
3254                 }
3255         }
3256
3257         spin_lock_irqsave(&iommu->lock, flags);
3258
3259         /* Context tables are copied, now write them to the root_entry table */
3260         for (bus = 0; bus < 256; bus++) {
3261                 int idx = ext ? bus * 2 : bus;
3262                 u64 val;
3263
3264                 if (ctxt_tbls[idx]) {
3265                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
3266                         iommu->root_entry[bus].lo = val;
3267                 }
3268
3269                 if (!ext || !ctxt_tbls[idx + 1])
3270                         continue;
3271
3272                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3273                 iommu->root_entry[bus].hi = val;
3274         }
3275
3276         spin_unlock_irqrestore(&iommu->lock, flags);
3277
3278         kfree(ctxt_tbls);
3279
3280         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3281
3282         ret = 0;
3283
3284 out_unmap:
3285         memunmap(old_rt);
3286
3287         return ret;
3288 }
3289
3290 static int __init init_dmars(void)
3291 {
3292         struct dmar_drhd_unit *drhd;
3293         struct dmar_rmrr_unit *rmrr;
3294         bool copied_tables = false;
3295         struct device *dev;
3296         struct intel_iommu *iommu;
3297         int i, ret;
3298
3299         /*
3300          * for each drhd
3301          *    allocate root
3302          *    initialize and program root entry to not present
3303          * endfor
3304          */
3305         for_each_drhd_unit(drhd) {
3306                 /*
3307                  * lock not needed as this is only incremented in the single
3308                  * threaded kernel __init code path all other access are read
3309                  * only
3310                  */
3311                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3312                         g_num_of_iommus++;
3313                         continue;
3314                 }
3315                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3316         }
3317
3318         /* Preallocate enough resources for IOMMU hot-addition */
3319         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3320                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3321
3322         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3323                         GFP_KERNEL);
3324         if (!g_iommus) {
3325                 pr_err("Allocating global iommu array failed\n");
3326                 ret = -ENOMEM;
3327                 goto error;
3328         }
3329
3330         for_each_active_iommu(iommu, drhd) {
3331                 /*
3332                  * Find the max pasid size of all IOMMU's in the system.
3333                  * We need to ensure the system pasid table is no bigger
3334                  * than the smallest supported.
3335                  */
3336                 if (pasid_supported(iommu)) {
3337                         u32 temp = 2 << ecap_pss(iommu->ecap);
3338
3339                         intel_pasid_max_id = min_t(u32, temp,
3340                                                    intel_pasid_max_id);
3341                 }
3342
3343                 g_iommus[iommu->seq_id] = iommu;
3344
3345                 intel_iommu_init_qi(iommu);
3346
3347                 ret = iommu_init_domains(iommu);
3348                 if (ret)
3349                         goto free_iommu;
3350
3351                 init_translation_status(iommu);
3352
3353                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3354                         iommu_disable_translation(iommu);
3355                         clear_translation_pre_enabled(iommu);
3356                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3357                                 iommu->name);
3358                 }
3359
3360                 /*
3361                  * TBD:
3362                  * we could share the same root & context tables
3363                  * among all IOMMU's. Need to Split it later.
3364                  */
3365                 ret = iommu_alloc_root_entry(iommu);
3366                 if (ret)
3367                         goto free_iommu;
3368
3369                 if (translation_pre_enabled(iommu)) {
3370                         pr_info("Translation already enabled - trying to copy translation structures\n");
3371
3372                         ret = copy_translation_tables(iommu);
3373                         if (ret) {
3374                                 /*
3375                                  * We found the IOMMU with translation
3376                                  * enabled - but failed to copy over the
3377                                  * old root-entry table. Try to proceed
3378                                  * by disabling translation now and
3379                                  * allocating a clean root-entry table.
3380                                  * This might cause DMAR faults, but
3381                                  * probably the dump will still succeed.
3382                                  */
3383                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3384                                        iommu->name);
3385                                 iommu_disable_translation(iommu);
3386                                 clear_translation_pre_enabled(iommu);
3387                         } else {
3388                                 pr_info("Copied translation tables from previous kernel for %s\n",
3389                                         iommu->name);
3390                                 copied_tables = true;
3391                         }
3392                 }
3393
3394                 if (!ecap_pass_through(iommu->ecap))
3395                         hw_pass_through = 0;
3396 #ifdef CONFIG_INTEL_IOMMU_SVM
3397                 if (pasid_supported(iommu))
3398                         intel_svm_init(iommu);
3399 #endif
3400         }
3401
3402         /*
3403          * Now that qi is enabled on all iommus, set the root entry and flush
3404          * caches. This is required on some Intel X58 chipsets, otherwise the
3405          * flush_context function will loop forever and the boot hangs.
3406          */
3407         for_each_active_iommu(iommu, drhd) {
3408                 iommu_flush_write_buffer(iommu);
3409                 iommu_set_root_entry(iommu);
3410                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3411                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3412         }
3413
3414         if (iommu_pass_through)
3415                 iommu_identity_mapping |= IDENTMAP_ALL;
3416
3417 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3418         dmar_map_gfx = 0;
3419 #endif
3420
3421         if (!dmar_map_gfx)
3422                 iommu_identity_mapping |= IDENTMAP_GFX;
3423
3424         check_tylersburg_isoch();
3425
3426         if (iommu_identity_mapping) {
3427                 ret = si_domain_init(hw_pass_through);
3428                 if (ret)
3429                         goto free_iommu;
3430         }
3431
3432
3433         /*
3434          * If we copied translations from a previous kernel in the kdump
3435          * case, we can not assign the devices to domains now, as that
3436          * would eliminate the old mappings. So skip this part and defer
3437          * the assignment to device driver initialization time.
3438          */
3439         if (copied_tables)
3440                 goto domains_done;
3441
3442         /*
3443          * If pass through is not set or not enabled, setup context entries for
3444          * identity mappings for rmrr, gfx, and isa and may fall back to static
3445          * identity mapping if iommu_identity_mapping is set.
3446          */
3447         if (iommu_identity_mapping) {
3448                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3449                 if (ret) {
3450                         pr_crit("Failed to setup IOMMU pass-through\n");
3451                         goto free_iommu;
3452                 }
3453         }
3454         /*
3455          * For each rmrr
3456          *   for each dev attached to rmrr
3457          *   do
3458          *     locate drhd for dev, alloc domain for dev
3459          *     allocate free domain
3460          *     allocate page table entries for rmrr
3461          *     if context not allocated for bus
3462          *           allocate and init context
3463          *           set present in root table for this bus
3464          *     init context with domain, translation etc
3465          *    endfor
3466          * endfor
3467          */
3468         pr_info("Setting RMRR:\n");
3469         for_each_rmrr_units(rmrr) {
3470                 /* some BIOS lists non-exist devices in DMAR table. */
3471                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3472                                           i, dev) {
3473                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3474                         if (ret)
3475                                 pr_err("Mapping reserved region failed\n");
3476                 }
3477         }
3478
3479         iommu_prepare_isa();
3480
3481 domains_done:
3482
3483         /*
3484          * for each drhd
3485          *   enable fault log
3486          *   global invalidate context cache
3487          *   global invalidate iotlb
3488          *   enable translation
3489          */
3490         for_each_iommu(iommu, drhd) {
3491                 if (drhd->ignored) {
3492                         /*
3493                          * we always have to disable PMRs or DMA may fail on
3494                          * this device
3495                          */
3496                         if (force_on)
3497                                 iommu_disable_protect_mem_regions(iommu);
3498                         continue;
3499                 }
3500
3501                 iommu_flush_write_buffer(iommu);
3502
3503 #ifdef CONFIG_INTEL_IOMMU_SVM
3504                 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3505                         /*
3506                          * Call dmar_alloc_hwirq() with dmar_global_lock held,
3507                          * could cause possible lock race condition.
3508                          */
3509                         up_write(&dmar_global_lock);
3510                         ret = intel_svm_enable_prq(iommu);
3511                         down_write(&dmar_global_lock);
3512                         if (ret)
3513                                 goto free_iommu;
3514                 }
3515 #endif
3516                 ret = dmar_set_interrupt(iommu);
3517                 if (ret)
3518                         goto free_iommu;
3519
3520                 if (!translation_pre_enabled(iommu))
3521                         iommu_enable_translation(iommu);
3522
3523                 iommu_disable_protect_mem_regions(iommu);
3524         }
3525
3526         return 0;
3527
3528 free_iommu:
3529         for_each_active_iommu(iommu, drhd) {
3530                 disable_dmar_iommu(iommu);
3531                 free_dmar_iommu(iommu);
3532         }
3533
3534         kfree(g_iommus);
3535
3536 error:
3537         return ret;
3538 }
3539
3540 /* This takes a number of _MM_ pages, not VTD pages */
3541 static unsigned long intel_alloc_iova(struct device *dev,
3542                                      struct dmar_domain *domain,
3543                                      unsigned long nrpages, uint64_t dma_mask)
3544 {
3545         unsigned long iova_pfn;
3546
3547         /* Restrict dma_mask to the width that the iommu can handle */
3548         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3549         /* Ensure we reserve the whole size-aligned region */
3550         nrpages = __roundup_pow_of_two(nrpages);
3551
3552         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3553                 /*
3554                  * First try to allocate an io virtual address in
3555                  * DMA_BIT_MASK(32) and if that fails then try allocating
3556                  * from higher range
3557                  */
3558                 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3559                                            IOVA_PFN(DMA_BIT_MASK(32)), false);
3560                 if (iova_pfn)
3561                         return iova_pfn;
3562         }
3563         iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3564                                    IOVA_PFN(dma_mask), true);
3565         if (unlikely(!iova_pfn)) {
3566                 dev_err(dev, "Allocating %ld-page iova failed", nrpages);
3567                 return 0;
3568         }
3569
3570         return iova_pfn;
3571 }
3572
3573 struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3574 {
3575         struct dmar_domain *domain, *tmp;
3576         struct dmar_rmrr_unit *rmrr;
3577         struct device *i_dev;
3578         int i, ret;
3579
3580         domain = find_domain(dev);
3581         if (domain)
3582                 goto out;
3583
3584         domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3585         if (!domain)
3586                 goto out;
3587
3588         /* We have a new domain - setup possible RMRRs for the device */
3589         rcu_read_lock();
3590         for_each_rmrr_units(rmrr) {
3591                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3592                                           i, i_dev) {
3593                         if (i_dev != dev)
3594                                 continue;
3595
3596                         ret = domain_prepare_identity_map(dev, domain,
3597                                                           rmrr->base_address,
3598                                                           rmrr->end_address);
3599                         if (ret)
3600                                 dev_err(dev, "Mapping reserved region failed\n");
3601                 }
3602         }
3603         rcu_read_unlock();
3604
3605         tmp = set_domain_for_dev(dev, domain);
3606         if (!tmp || domain != tmp) {
3607                 domain_exit(domain);
3608                 domain = tmp;
3609         }
3610
3611 out:
3612
3613         if (!domain)
3614                 dev_err(dev, "Allocating domain failed\n");
3615
3616
3617         return domain;
3618 }
3619
3620 /* Check if the dev needs to go through non-identity map and unmap process.*/
3621 static bool iommu_need_mapping(struct device *dev)
3622 {
3623         int found;
3624
3625         if (iommu_dummy(dev))
3626                 return false;
3627
3628         if (!iommu_identity_mapping)
3629                 return true;
3630
3631         found = identity_mapping(dev);
3632         if (found) {
3633                 if (iommu_should_identity_map(dev, 0))
3634                         return false;
3635
3636                 /*
3637                  * 32 bit DMA is removed from si_domain and fall back to
3638                  * non-identity mapping.
3639                  */
3640                 dmar_remove_one_dev_info(dev);
3641                 dev_info(dev, "32bit DMA uses non-identity mapping\n");
3642         } else {
3643                 /*
3644                  * In case of a detached 64 bit DMA device from vm, the device
3645                  * is put into si_domain for identity mapping.
3646                  */
3647                 if (iommu_should_identity_map(dev, 0) &&
3648                     !domain_add_dev_info(si_domain, dev)) {
3649                         dev_info(dev, "64bit DMA uses identity mapping\n");
3650                         return false;
3651                 }
3652         }
3653
3654         return true;
3655 }
3656
3657 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3658                                      size_t size, int dir, u64 dma_mask)
3659 {
3660         struct dmar_domain *domain;
3661         phys_addr_t start_paddr;
3662         unsigned long iova_pfn;
3663         int prot = 0;
3664         int ret;
3665         struct intel_iommu *iommu;
3666         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3667
3668         BUG_ON(dir == DMA_NONE);
3669
3670         domain = get_valid_domain_for_dev(dev);
3671         if (!domain)
3672                 return DMA_MAPPING_ERROR;
3673
3674         iommu = domain_get_iommu(domain);
3675         size = aligned_nrpages(paddr, size);
3676
3677         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3678         if (!iova_pfn)
3679                 goto error;
3680
3681         /*
3682          * Check if DMAR supports zero-length reads on write only
3683          * mappings..
3684          */
3685         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3686                         !cap_zlr(iommu->cap))
3687                 prot |= DMA_PTE_READ;
3688         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3689                 prot |= DMA_PTE_WRITE;
3690         /*
3691          * paddr - (paddr + size) might be partial page, we should map the whole
3692          * page.  Note: if two part of one page are separately mapped, we
3693          * might have two guest_addr mapping to the same host paddr, but this
3694          * is not a big problem
3695          */
3696         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3697                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3698         if (ret)
3699                 goto error;
3700
3701         start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3702         start_paddr += paddr & ~PAGE_MASK;
3703         return start_paddr;
3704
3705 error:
3706         if (iova_pfn)
3707                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3708         dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3709                 size, (unsigned long long)paddr, dir);
3710         return DMA_MAPPING_ERROR;
3711 }
3712
3713 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3714                                  unsigned long offset, size_t size,
3715                                  enum dma_data_direction dir,
3716                                  unsigned long attrs)
3717 {
3718         if (iommu_need_mapping(dev))
3719                 return __intel_map_single(dev, page_to_phys(page) + offset,
3720                                 size, dir, *dev->dma_mask);
3721         return dma_direct_map_page(dev, page, offset, size, dir, attrs);
3722 }
3723
3724 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3725                                      size_t size, enum dma_data_direction dir,
3726                                      unsigned long attrs)
3727 {
3728         if (iommu_need_mapping(dev))
3729                 return __intel_map_single(dev, phys_addr, size, dir,
3730                                 *dev->dma_mask);
3731         return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3732 }
3733
3734 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3735 {
3736         struct dmar_domain *domain;
3737         unsigned long start_pfn, last_pfn;
3738         unsigned long nrpages;
3739         unsigned long iova_pfn;
3740         struct intel_iommu *iommu;
3741         struct page *freelist;
3742         struct pci_dev *pdev = NULL;
3743
3744         domain = find_domain(dev);
3745         BUG_ON(!domain);
3746
3747         iommu = domain_get_iommu(domain);
3748
3749         iova_pfn = IOVA_PFN(dev_addr);
3750
3751         nrpages = aligned_nrpages(dev_addr, size);
3752         start_pfn = mm_to_dma_pfn(iova_pfn);
3753         last_pfn = start_pfn + nrpages - 1;
3754
3755         if (dev_is_pci(dev))
3756                 pdev = to_pci_dev(dev);
3757
3758         dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3759
3760         freelist = domain_unmap(domain, start_pfn, last_pfn);
3761
3762         if (intel_iommu_strict || (pdev && pdev->untrusted)) {
3763                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3764                                       nrpages, !freelist, 0);
3765                 /* free iova */
3766                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3767                 dma_free_pagelist(freelist);
3768         } else {
3769                 queue_iova(&domain->iovad, iova_pfn, nrpages,
3770                            (unsigned long)freelist);
3771                 /*
3772                  * queue up the release of the unmap to save the 1/6th of the
3773                  * cpu used up by the iotlb flush operation...
3774                  */
3775         }
3776 }
3777
3778 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3779                              size_t size, enum dma_data_direction dir,
3780                              unsigned long attrs)
3781 {
3782         if (iommu_need_mapping(dev))
3783                 intel_unmap(dev, dev_addr, size);
3784         else
3785                 dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
3786 }
3787
3788 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3789                 size_t size, enum dma_data_direction dir, unsigned long attrs)
3790 {
3791         if (iommu_need_mapping(dev))
3792                 intel_unmap(dev, dev_addr, size);
3793 }
3794
3795 static void *intel_alloc_coherent(struct device *dev, size_t size,
3796                                   dma_addr_t *dma_handle, gfp_t flags,
3797                                   unsigned long attrs)
3798 {
3799         struct page *page = NULL;
3800         int order;
3801
3802         if (!iommu_need_mapping(dev))
3803                 return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3804
3805         size = PAGE_ALIGN(size);
3806         order = get_order(size);
3807
3808         if (gfpflags_allow_blocking(flags)) {
3809                 unsigned int count = size >> PAGE_SHIFT;
3810
3811                 page = dma_alloc_from_contiguous(dev, count, order,
3812                                                  flags & __GFP_NOWARN);
3813         }
3814
3815         if (!page)
3816                 page = alloc_pages(flags, order);
3817         if (!page)
3818                 return NULL;
3819         memset(page_address(page), 0, size);
3820
3821         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3822                                          DMA_BIDIRECTIONAL,
3823                                          dev->coherent_dma_mask);
3824         if (*dma_handle != DMA_MAPPING_ERROR)
3825                 return page_address(page);
3826         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3827                 __free_pages(page, order);
3828
3829         return NULL;
3830 }
3831
3832 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3833                                 dma_addr_t dma_handle, unsigned long attrs)
3834 {
3835         int order;
3836         struct page *page = virt_to_page(vaddr);
3837
3838         if (!iommu_need_mapping(dev))
3839                 return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3840
3841         size = PAGE_ALIGN(size);
3842         order = get_order(size);
3843
3844         intel_unmap(dev, dma_handle, size);
3845         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3846                 __free_pages(page, order);
3847 }
3848
3849 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3850                            int nelems, enum dma_data_direction dir,
3851                            unsigned long attrs)
3852 {
3853         dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3854         unsigned long nrpages = 0;
3855         struct scatterlist *sg;
3856         int i;
3857
3858         if (!iommu_need_mapping(dev))
3859                 return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
3860
3861         for_each_sg(sglist, sg, nelems, i) {
3862                 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3863         }
3864
3865         intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3866 }
3867
3868 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3869                         enum dma_data_direction dir, unsigned long attrs)
3870 {
3871         int i;
3872         struct dmar_domain *domain;
3873         size_t size = 0;
3874         int prot = 0;
3875         unsigned long iova_pfn;
3876         int ret;
3877         struct scatterlist *sg;
3878         unsigned long start_vpfn;
3879         struct intel_iommu *iommu;
3880
3881         BUG_ON(dir == DMA_NONE);
3882         if (!iommu_need_mapping(dev))
3883                 return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
3884
3885         domain = get_valid_domain_for_dev(dev);
3886         if (!domain)
3887                 return 0;
3888
3889         iommu = domain_get_iommu(domain);
3890
3891         for_each_sg(sglist, sg, nelems, i)
3892                 size += aligned_nrpages(sg->offset, sg->length);
3893
3894         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3895                                 *dev->dma_mask);
3896         if (!iova_pfn) {
3897                 sglist->dma_length = 0;
3898                 return 0;
3899         }
3900
3901         /*
3902          * Check if DMAR supports zero-length reads on write only
3903          * mappings..
3904          */
3905         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3906                         !cap_zlr(iommu->cap))
3907                 prot |= DMA_PTE_READ;
3908         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3909                 prot |= DMA_PTE_WRITE;
3910
3911         start_vpfn = mm_to_dma_pfn(iova_pfn);
3912
3913         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3914         if (unlikely(ret)) {
3915                 dma_pte_free_pagetable(domain, start_vpfn,
3916                                        start_vpfn + size - 1,
3917                                        agaw_to_level(domain->agaw) + 1);
3918                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3919                 return 0;
3920         }
3921
3922         return nelems;
3923 }
3924
3925 static const struct dma_map_ops intel_dma_ops = {
3926         .alloc = intel_alloc_coherent,
3927         .free = intel_free_coherent,
3928         .map_sg = intel_map_sg,
3929         .unmap_sg = intel_unmap_sg,
3930         .map_page = intel_map_page,
3931         .unmap_page = intel_unmap_page,
3932         .map_resource = intel_map_resource,
3933         .unmap_resource = intel_unmap_resource,
3934         .dma_supported = dma_direct_supported,
3935 };
3936
3937 static inline int iommu_domain_cache_init(void)
3938 {
3939         int ret = 0;
3940
3941         iommu_domain_cache = kmem_cache_create("iommu_domain",
3942                                          sizeof(struct dmar_domain),
3943                                          0,
3944                                          SLAB_HWCACHE_ALIGN,
3945
3946                                          NULL);
3947         if (!iommu_domain_cache) {
3948                 pr_err("Couldn't create iommu_domain cache\n");
3949                 ret = -ENOMEM;
3950         }
3951
3952         return ret;
3953 }
3954
3955 static inline int iommu_devinfo_cache_init(void)
3956 {
3957         int ret = 0;
3958
3959         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3960                                          sizeof(struct device_domain_info),
3961                                          0,
3962                                          SLAB_HWCACHE_ALIGN,
3963                                          NULL);
3964         if (!iommu_devinfo_cache) {
3965                 pr_err("Couldn't create devinfo cache\n");
3966                 ret = -ENOMEM;
3967         }
3968
3969         return ret;
3970 }
3971
3972 static int __init iommu_init_mempool(void)
3973 {
3974         int ret;
3975         ret = iova_cache_get();
3976         if (ret)
3977                 return ret;
3978
3979         ret = iommu_domain_cache_init();
3980         if (ret)
3981                 goto domain_error;
3982
3983         ret = iommu_devinfo_cache_init();
3984         if (!ret)
3985                 return ret;
3986
3987         kmem_cache_destroy(iommu_domain_cache);
3988 domain_error:
3989         iova_cache_put();
3990
3991         return -ENOMEM;
3992 }
3993
3994 static void __init iommu_exit_mempool(void)
3995 {
3996         kmem_cache_destroy(iommu_devinfo_cache);
3997         kmem_cache_destroy(iommu_domain_cache);
3998         iova_cache_put();
3999 }
4000
4001 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4002 {
4003         struct dmar_drhd_unit *drhd;
4004         u32 vtbar;
4005         int rc;
4006
4007         /* We know that this device on this chipset has its own IOMMU.
4008          * If we find it under a different IOMMU, then the BIOS is lying
4009          * to us. Hope that the IOMMU for this device is actually
4010          * disabled, and it needs no translation...
4011          */
4012         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4013         if (rc) {
4014                 /* "can't" happen */
4015                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4016                 return;
4017         }
4018         vtbar &= 0xffff0000;
4019
4020         /* we know that the this iommu should be at offset 0xa000 from vtbar */
4021         drhd = dmar_find_matched_drhd_unit(pdev);
4022         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4023                             TAINT_FIRMWARE_WORKAROUND,
4024                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4025                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4026 }
4027 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4028
4029 static void __init init_no_remapping_devices(void)
4030 {
4031         struct dmar_drhd_unit *drhd;
4032         struct device *dev;
4033         int i;
4034
4035         for_each_drhd_unit(drhd) {
4036                 if (!drhd->include_all) {
4037                         for_each_active_dev_scope(drhd->devices,
4038                                                   drhd->devices_cnt, i, dev)
4039                                 break;
4040                         /* ignore DMAR unit if no devices exist */
4041                         if (i == drhd->devices_cnt)
4042                                 drhd->ignored = 1;
4043                 }
4044         }
4045
4046         for_each_active_drhd_unit(drhd) {
4047                 if (drhd->include_all)
4048                         continue;
4049
4050                 for_each_active_dev_scope(drhd->devices,
4051                                           drhd->devices_cnt, i, dev)
4052                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4053                                 break;
4054                 if (i < drhd->devices_cnt)
4055                         continue;
4056
4057                 /* This IOMMU has *only* gfx devices. Either bypass it or
4058                    set the gfx_mapped flag, as appropriate */
4059                 if (!dmar_map_gfx) {
4060                         drhd->ignored = 1;
4061                         for_each_active_dev_scope(drhd->devices,
4062                                                   drhd->devices_cnt, i, dev)
4063                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4064                 }
4065         }
4066 }
4067
4068 #ifdef CONFIG_SUSPEND
4069 static int init_iommu_hw(void)
4070 {
4071         struct dmar_drhd_unit *drhd;
4072         struct intel_iommu *iommu = NULL;
4073
4074         for_each_active_iommu(iommu, drhd)
4075                 if (iommu->qi)
4076                         dmar_reenable_qi(iommu);
4077
4078         for_each_iommu(iommu, drhd) {
4079                 if (drhd->ignored) {
4080                         /*
4081                          * we always have to disable PMRs or DMA may fail on
4082                          * this device
4083                          */
4084                         if (force_on)
4085                                 iommu_disable_protect_mem_regions(iommu);
4086                         continue;
4087                 }
4088
4089                 iommu_flush_write_buffer(iommu);
4090
4091                 iommu_set_root_entry(iommu);
4092
4093                 iommu->flush.flush_context(iommu, 0, 0, 0,
4094                                            DMA_CCMD_GLOBAL_INVL);
4095                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4096                 iommu_enable_translation(iommu);
4097                 iommu_disable_protect_mem_regions(iommu);
4098         }
4099
4100         return 0;
4101 }
4102
4103 static void iommu_flush_all(void)
4104 {
4105         struct dmar_drhd_unit *drhd;
4106         struct intel_iommu *iommu;
4107
4108         for_each_active_iommu(iommu, drhd) {
4109                 iommu->flush.flush_context(iommu, 0, 0, 0,
4110                                            DMA_CCMD_GLOBAL_INVL);
4111                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4112                                          DMA_TLB_GLOBAL_FLUSH);
4113         }
4114 }
4115
4116 static int iommu_suspend(void)
4117 {
4118         struct dmar_drhd_unit *drhd;
4119         struct intel_iommu *iommu = NULL;
4120         unsigned long flag;
4121
4122         for_each_active_iommu(iommu, drhd) {
4123                 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
4124                                                  GFP_ATOMIC);
4125                 if (!iommu->iommu_state)
4126                         goto nomem;
4127         }
4128
4129         iommu_flush_all();
4130
4131         for_each_active_iommu(iommu, drhd) {
4132                 iommu_disable_translation(iommu);
4133
4134                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4135
4136                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4137                         readl(iommu->reg + DMAR_FECTL_REG);
4138                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4139                         readl(iommu->reg + DMAR_FEDATA_REG);
4140                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4141                         readl(iommu->reg + DMAR_FEADDR_REG);
4142                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4143                         readl(iommu->reg + DMAR_FEUADDR_REG);
4144
4145                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4146         }
4147         return 0;
4148
4149 nomem:
4150         for_each_active_iommu(iommu, drhd)
4151                 kfree(iommu->iommu_state);
4152
4153         return -ENOMEM;
4154 }
4155
4156 static void iommu_resume(void)
4157 {
4158         struct dmar_drhd_unit *drhd;
4159         struct intel_iommu *iommu = NULL;
4160         unsigned long flag;
4161
4162         if (init_iommu_hw()) {
4163                 if (force_on)
4164                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4165                 else
4166                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4167                 return;
4168         }
4169
4170         for_each_active_iommu(iommu, drhd) {
4171
4172                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4173
4174                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4175                         iommu->reg + DMAR_FECTL_REG);
4176                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4177                         iommu->reg + DMAR_FEDATA_REG);
4178                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4179                         iommu->reg + DMAR_FEADDR_REG);
4180                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4181                         iommu->reg + DMAR_FEUADDR_REG);
4182
4183                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4184         }
4185
4186         for_each_active_iommu(iommu, drhd)
4187                 kfree(iommu->iommu_state);
4188 }
4189
4190 static struct syscore_ops iommu_syscore_ops = {
4191         .resume         = iommu_resume,
4192         .suspend        = iommu_suspend,
4193 };
4194
4195 static void __init init_iommu_pm_ops(void)
4196 {
4197         register_syscore_ops(&iommu_syscore_ops);
4198 }
4199
4200 #else
4201 static inline void init_iommu_pm_ops(void) {}
4202 #endif  /* CONFIG_PM */
4203
4204
4205 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4206 {
4207         struct acpi_dmar_reserved_memory *rmrr;
4208         int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4209         struct dmar_rmrr_unit *rmrru;
4210         size_t length;
4211
4212         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4213         if (!rmrru)
4214                 goto out;
4215
4216         rmrru->hdr = header;
4217         rmrr = (struct acpi_dmar_reserved_memory *)header;
4218         rmrru->base_address = rmrr->base_address;
4219         rmrru->end_address = rmrr->end_address;
4220
4221         length = rmrr->end_address - rmrr->base_address + 1;
4222         rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4223                                               IOMMU_RESV_DIRECT);
4224         if (!rmrru->resv)
4225                 goto free_rmrru;
4226
4227         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4228                                 ((void *)rmrr) + rmrr->header.length,
4229                                 &rmrru->devices_cnt);
4230         if (rmrru->devices_cnt && rmrru->devices == NULL)
4231                 goto free_all;
4232
4233         list_add(&rmrru->list, &dmar_rmrr_units);
4234
4235         return 0;
4236 free_all:
4237         kfree(rmrru->resv);
4238 free_rmrru:
4239         kfree(rmrru);
4240 out:
4241         return -ENOMEM;
4242 }
4243
4244 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4245 {
4246         struct dmar_atsr_unit *atsru;
4247         struct acpi_dmar_atsr *tmp;
4248
4249         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4250                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4251                 if (atsr->segment != tmp->segment)
4252                         continue;
4253                 if (atsr->header.length != tmp->header.length)
4254                         continue;
4255                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4256                         return atsru;
4257         }
4258
4259         return NULL;
4260 }
4261
4262 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4263 {
4264         struct acpi_dmar_atsr *atsr;
4265         struct dmar_atsr_unit *atsru;
4266
4267         if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4268                 return 0;
4269
4270         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4271         atsru = dmar_find_atsr(atsr);
4272         if (atsru)
4273                 return 0;
4274
4275         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4276         if (!atsru)
4277                 return -ENOMEM;
4278
4279         /*
4280          * If memory is allocated from slab by ACPI _DSM method, we need to
4281          * copy the memory content because the memory buffer will be freed
4282          * on return.
4283          */
4284         atsru->hdr = (void *)(atsru + 1);
4285         memcpy(atsru->hdr, hdr, hdr->length);
4286         atsru->include_all = atsr->flags & 0x1;
4287         if (!atsru->include_all) {
4288                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4289                                 (void *)atsr + atsr->header.length,
4290                                 &atsru->devices_cnt);
4291                 if (atsru->devices_cnt && atsru->devices == NULL) {
4292                         kfree(atsru);
4293                         return -ENOMEM;
4294                 }
4295         }
4296
4297         list_add_rcu(&atsru->list, &dmar_atsr_units);
4298
4299         return 0;
4300 }
4301
4302 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4303 {
4304         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4305         kfree(atsru);
4306 }
4307
4308 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4309 {
4310         struct acpi_dmar_atsr *atsr;
4311         struct dmar_atsr_unit *atsru;
4312
4313         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4314         atsru = dmar_find_atsr(atsr);
4315         if (atsru) {
4316                 list_del_rcu(&atsru->list);
4317                 synchronize_rcu();
4318                 intel_iommu_free_atsr(atsru);
4319         }
4320
4321         return 0;
4322 }
4323
4324 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4325 {
4326         int i;
4327         struct device *dev;
4328         struct acpi_dmar_atsr *atsr;
4329         struct dmar_atsr_unit *atsru;
4330
4331         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4332         atsru = dmar_find_atsr(atsr);
4333         if (!atsru)
4334                 return 0;
4335
4336         if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4337                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4338                                           i, dev)
4339                         return -EBUSY;
4340         }
4341
4342         return 0;
4343 }
4344
4345 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4346 {
4347         int sp, ret;
4348         struct intel_iommu *iommu = dmaru->iommu;
4349
4350         if (g_iommus[iommu->seq_id])
4351                 return 0;
4352
4353         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4354                 pr_warn("%s: Doesn't support hardware pass through.\n",
4355                         iommu->name);
4356                 return -ENXIO;
4357         }
4358         if (!ecap_sc_support(iommu->ecap) &&
4359             domain_update_iommu_snooping(iommu)) {
4360                 pr_warn("%s: Doesn't support snooping.\n",
4361                         iommu->name);
4362                 return -ENXIO;
4363         }
4364         sp = domain_update_iommu_superpage(iommu) - 1;
4365         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4366                 pr_warn("%s: Doesn't support large page.\n",
4367                         iommu->name);
4368                 return -ENXIO;
4369         }
4370
4371         /*
4372          * Disable translation if already enabled prior to OS handover.
4373          */
4374         if (iommu->gcmd & DMA_GCMD_TE)
4375                 iommu_disable_translation(iommu);
4376
4377         g_iommus[iommu->seq_id] = iommu;
4378         ret = iommu_init_domains(iommu);
4379         if (ret == 0)
4380                 ret = iommu_alloc_root_entry(iommu);
4381         if (ret)
4382                 goto out;
4383
4384 #ifdef CONFIG_INTEL_IOMMU_SVM
4385         if (pasid_supported(iommu))
4386                 intel_svm_init(iommu);
4387 #endif
4388
4389         if (dmaru->ignored) {
4390                 /*
4391                  * we always have to disable PMRs or DMA may fail on this device
4392                  */
4393                 if (force_on)
4394                         iommu_disable_protect_mem_regions(iommu);
4395                 return 0;
4396         }
4397
4398         intel_iommu_init_qi(iommu);
4399         iommu_flush_write_buffer(iommu);
4400
4401 #ifdef CONFIG_INTEL_IOMMU_SVM
4402         if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4403                 ret = intel_svm_enable_prq(iommu);
4404                 if (ret)
4405                         goto disable_iommu;
4406         }
4407 #endif
4408         ret = dmar_set_interrupt(iommu);
4409         if (ret)
4410                 goto disable_iommu;
4411
4412         iommu_set_root_entry(iommu);
4413         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4414         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4415         iommu_enable_translation(iommu);
4416
4417         iommu_disable_protect_mem_regions(iommu);
4418         return 0;
4419
4420 disable_iommu:
4421         disable_dmar_iommu(iommu);
4422 out:
4423         free_dmar_iommu(iommu);
4424         return ret;
4425 }
4426
4427 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4428 {
4429         int ret = 0;
4430         struct intel_iommu *iommu = dmaru->iommu;
4431
4432         if (!intel_iommu_enabled)
4433                 return 0;
4434         if (iommu == NULL)
4435                 return -EINVAL;
4436
4437         if (insert) {
4438                 ret = intel_iommu_add(dmaru);
4439         } else {
4440                 disable_dmar_iommu(iommu);
4441                 free_dmar_iommu(iommu);
4442         }
4443
4444         return ret;
4445 }
4446
4447 static void intel_iommu_free_dmars(void)
4448 {
4449         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4450         struct dmar_atsr_unit *atsru, *atsr_n;
4451
4452         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4453                 list_del(&rmrru->list);
4454                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4455                 kfree(rmrru->resv);
4456                 kfree(rmrru);
4457         }
4458
4459         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4460                 list_del(&atsru->list);
4461                 intel_iommu_free_atsr(atsru);
4462         }
4463 }
4464
4465 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4466 {
4467         int i, ret = 1;
4468         struct pci_bus *bus;
4469         struct pci_dev *bridge = NULL;
4470         struct device *tmp;
4471         struct acpi_dmar_atsr *atsr;
4472         struct dmar_atsr_unit *atsru;
4473
4474         dev = pci_physfn(dev);
4475         for (bus = dev->bus; bus; bus = bus->parent) {
4476                 bridge = bus->self;
4477                 /* If it's an integrated device, allow ATS */
4478                 if (!bridge)
4479                         return 1;
4480                 /* Connected via non-PCIe: no ATS */
4481                 if (!pci_is_pcie(bridge) ||
4482                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4483                         return 0;
4484                 /* If we found the root port, look it up in the ATSR */
4485                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4486                         break;
4487         }
4488
4489         rcu_read_lock();
4490         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4491                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4492                 if (atsr->segment != pci_domain_nr(dev->bus))
4493                         continue;
4494
4495                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4496                         if (tmp == &bridge->dev)
4497                                 goto out;
4498
4499                 if (atsru->include_all)
4500                         goto out;
4501         }
4502         ret = 0;
4503 out:
4504         rcu_read_unlock();
4505
4506         return ret;
4507 }
4508
4509 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4510 {
4511         int ret;
4512         struct dmar_rmrr_unit *rmrru;
4513         struct dmar_atsr_unit *atsru;
4514         struct acpi_dmar_atsr *atsr;
4515         struct acpi_dmar_reserved_memory *rmrr;
4516
4517         if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4518                 return 0;
4519
4520         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4521                 rmrr = container_of(rmrru->hdr,
4522                                     struct acpi_dmar_reserved_memory, header);
4523                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4524                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4525                                 ((void *)rmrr) + rmrr->header.length,
4526                                 rmrr->segment, rmrru->devices,
4527                                 rmrru->devices_cnt);
4528                         if (ret < 0)
4529                                 return ret;
4530                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4531                         dmar_remove_dev_scope(info, rmrr->segment,
4532                                 rmrru->devices, rmrru->devices_cnt);
4533                 }
4534         }
4535
4536         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4537                 if (atsru->include_all)
4538                         continue;
4539
4540                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4541                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4542                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4543                                         (void *)atsr + atsr->header.length,
4544                                         atsr->segment, atsru->devices,
4545                                         atsru->devices_cnt);
4546                         if (ret > 0)
4547                                 break;
4548                         else if (ret < 0)
4549                                 return ret;
4550                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4551                         if (dmar_remove_dev_scope(info, atsr->segment,
4552                                         atsru->devices, atsru->devices_cnt))
4553                                 break;
4554                 }
4555         }
4556
4557         return 0;
4558 }
4559
4560 /*
4561  * Here we only respond to action of unbound device from driver.
4562  *
4563  * Added device is not attached to its DMAR domain here yet. That will happen
4564  * when mapping the device to iova.
4565  */
4566 static int device_notifier(struct notifier_block *nb,
4567                                   unsigned long action, void *data)
4568 {
4569         struct device *dev = data;
4570         struct dmar_domain *domain;
4571
4572         if (iommu_dummy(dev))
4573                 return 0;
4574
4575         if (action == BUS_NOTIFY_REMOVED_DEVICE) {
4576                 domain = find_domain(dev);
4577                 if (!domain)
4578                         return 0;
4579
4580                 dmar_remove_one_dev_info(dev);
4581                 if (!domain_type_is_vm_or_si(domain) &&
4582                     list_empty(&domain->devices))
4583                         domain_exit(domain);
4584         } else if (action == BUS_NOTIFY_ADD_DEVICE) {
4585                 if (iommu_should_identity_map(dev, 1))
4586                         domain_add_dev_info(si_domain, dev);
4587         }
4588
4589         return 0;
4590 }
4591
4592 static struct notifier_block device_nb = {
4593         .notifier_call = device_notifier,
4594 };
4595
4596 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4597                                        unsigned long val, void *v)
4598 {
4599         struct memory_notify *mhp = v;
4600         unsigned long long start, end;
4601         unsigned long start_vpfn, last_vpfn;
4602
4603         switch (val) {
4604         case MEM_GOING_ONLINE:
4605                 start = mhp->start_pfn << PAGE_SHIFT;
4606                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4607                 if (iommu_domain_identity_map(si_domain, start, end)) {
4608                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4609                                 start, end);
4610                         return NOTIFY_BAD;
4611                 }
4612                 break;
4613
4614         case MEM_OFFLINE:
4615         case MEM_CANCEL_ONLINE:
4616                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4617                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4618                 while (start_vpfn <= last_vpfn) {
4619                         struct iova *iova;
4620                         struct dmar_drhd_unit *drhd;
4621                         struct intel_iommu *iommu;
4622                         struct page *freelist;
4623
4624                         iova = find_iova(&si_domain->iovad, start_vpfn);
4625                         if (iova == NULL) {
4626                                 pr_debug("Failed get IOVA for PFN %lx\n",
4627                                          start_vpfn);
4628                                 break;
4629                         }
4630
4631                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4632                                                      start_vpfn, last_vpfn);
4633                         if (iova == NULL) {
4634                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4635                                         start_vpfn, last_vpfn);
4636                                 return NOTIFY_BAD;
4637                         }
4638
4639                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4640                                                iova->pfn_hi);
4641
4642                         rcu_read_lock();
4643                         for_each_active_iommu(iommu, drhd)
4644                                 iommu_flush_iotlb_psi(iommu, si_domain,
4645                                         iova->pfn_lo, iova_size(iova),
4646                                         !freelist, 0);
4647                         rcu_read_unlock();
4648                         dma_free_pagelist(freelist);
4649
4650                         start_vpfn = iova->pfn_hi + 1;
4651                         free_iova_mem(iova);
4652                 }
4653                 break;
4654         }
4655
4656         return NOTIFY_OK;
4657 }
4658
4659 static struct notifier_block intel_iommu_memory_nb = {
4660         .notifier_call = intel_iommu_memory_notifier,
4661         .priority = 0
4662 };
4663
4664 static void free_all_cpu_cached_iovas(unsigned int cpu)
4665 {
4666         int i;
4667
4668         for (i = 0; i < g_num_of_iommus; i++) {
4669                 struct intel_iommu *iommu = g_iommus[i];
4670                 struct dmar_domain *domain;
4671                 int did;
4672
4673                 if (!iommu)
4674                         continue;
4675
4676                 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4677                         domain = get_iommu_domain(iommu, (u16)did);
4678
4679                         if (!domain)
4680                                 continue;
4681                         free_cpu_cached_iovas(cpu, &domain->iovad);
4682                 }
4683         }
4684 }
4685
4686 static int intel_iommu_cpu_dead(unsigned int cpu)
4687 {
4688         free_all_cpu_cached_iovas(cpu);
4689         return 0;
4690 }
4691
4692 static void intel_disable_iommus(void)
4693 {
4694         struct intel_iommu *iommu = NULL;
4695         struct dmar_drhd_unit *drhd;
4696
4697         for_each_iommu(iommu, drhd)
4698                 iommu_disable_translation(iommu);
4699 }
4700
4701 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4702 {
4703         struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4704
4705         return container_of(iommu_dev, struct intel_iommu, iommu);
4706 }
4707
4708 static ssize_t intel_iommu_show_version(struct device *dev,
4709                                         struct device_attribute *attr,
4710                                         char *buf)
4711 {
4712         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4713         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4714         return sprintf(buf, "%d:%d\n",
4715                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4716 }
4717 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4718
4719 static ssize_t intel_iommu_show_address(struct device *dev,
4720                                         struct device_attribute *attr,
4721                                         char *buf)
4722 {
4723         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4724         return sprintf(buf, "%llx\n", iommu->reg_phys);
4725 }
4726 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4727
4728 static ssize_t intel_iommu_show_cap(struct device *dev,
4729                                     struct device_attribute *attr,
4730                                     char *buf)
4731 {
4732         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4733         return sprintf(buf, "%llx\n", iommu->cap);
4734 }
4735 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4736
4737 static ssize_t intel_iommu_show_ecap(struct device *dev,
4738                                     struct device_attribute *attr,
4739                                     char *buf)
4740 {
4741         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4742         return sprintf(buf, "%llx\n", iommu->ecap);
4743 }
4744 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4745
4746 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4747                                       struct device_attribute *attr,
4748                                       char *buf)
4749 {
4750         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4751         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4752 }
4753 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4754
4755 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4756                                            struct device_attribute *attr,
4757                                            char *buf)
4758 {
4759         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4760         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4761                                                   cap_ndoms(iommu->cap)));
4762 }
4763 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4764
4765 static struct attribute *intel_iommu_attrs[] = {
4766         &dev_attr_version.attr,
4767         &dev_attr_address.attr,
4768         &dev_attr_cap.attr,
4769         &dev_attr_ecap.attr,
4770         &dev_attr_domains_supported.attr,
4771         &dev_attr_domains_used.attr,
4772         NULL,
4773 };
4774
4775 static struct attribute_group intel_iommu_group = {
4776         .name = "intel-iommu",
4777         .attrs = intel_iommu_attrs,
4778 };
4779
4780 const struct attribute_group *intel_iommu_groups[] = {
4781         &intel_iommu_group,
4782         NULL,
4783 };
4784
4785 static int __init platform_optin_force_iommu(void)
4786 {
4787         struct pci_dev *pdev = NULL;
4788         bool has_untrusted_dev = false;
4789
4790         if (!dmar_platform_optin() || no_platform_optin)
4791                 return 0;
4792
4793         for_each_pci_dev(pdev) {
4794                 if (pdev->untrusted) {
4795                         has_untrusted_dev = true;
4796                         break;
4797                 }
4798         }
4799
4800         if (!has_untrusted_dev)
4801                 return 0;
4802
4803         if (no_iommu || dmar_disabled)
4804                 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4805
4806         /*
4807          * If Intel-IOMMU is disabled by default, we will apply identity
4808          * map for all devices except those marked as being untrusted.
4809          */
4810         if (dmar_disabled)
4811                 iommu_identity_mapping |= IDENTMAP_ALL;
4812
4813         dmar_disabled = 0;
4814 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4815         swiotlb = 0;
4816 #endif
4817         no_iommu = 0;
4818
4819         return 1;
4820 }
4821
4822 int __init intel_iommu_init(void)
4823 {
4824         int ret = -ENODEV;
4825         struct dmar_drhd_unit *drhd;
4826         struct intel_iommu *iommu;
4827
4828         /*
4829          * Intel IOMMU is required for a TXT/tboot launch or platform
4830          * opt in, so enforce that.
4831          */
4832         force_on = tboot_force_iommu() || platform_optin_force_iommu();
4833
4834         if (iommu_init_mempool()) {
4835                 if (force_on)
4836                         panic("tboot: Failed to initialize iommu memory\n");
4837                 return -ENOMEM;
4838         }
4839
4840         down_write(&dmar_global_lock);
4841         if (dmar_table_init()) {
4842                 if (force_on)
4843                         panic("tboot: Failed to initialize DMAR table\n");
4844                 goto out_free_dmar;
4845         }
4846
4847         if (dmar_dev_scope_init() < 0) {
4848                 if (force_on)
4849                         panic("tboot: Failed to initialize DMAR device scope\n");
4850                 goto out_free_dmar;
4851         }
4852
4853         up_write(&dmar_global_lock);
4854
4855         /*
4856          * The bus notifier takes the dmar_global_lock, so lockdep will
4857          * complain later when we register it under the lock.
4858          */
4859         dmar_register_bus_notifier();
4860
4861         down_write(&dmar_global_lock);
4862
4863         if (no_iommu || dmar_disabled) {
4864                 /*
4865                  * We exit the function here to ensure IOMMU's remapping and
4866                  * mempool aren't setup, which means that the IOMMU's PMRs
4867                  * won't be disabled via the call to init_dmars(). So disable
4868                  * it explicitly here. The PMRs were setup by tboot prior to
4869                  * calling SENTER, but the kernel is expected to reset/tear
4870                  * down the PMRs.
4871                  */
4872                 if (intel_iommu_tboot_noforce) {
4873                         for_each_iommu(iommu, drhd)
4874                                 iommu_disable_protect_mem_regions(iommu);
4875                 }
4876
4877                 /*
4878                  * Make sure the IOMMUs are switched off, even when we
4879                  * boot into a kexec kernel and the previous kernel left
4880                  * them enabled
4881                  */
4882                 intel_disable_iommus();
4883                 goto out_free_dmar;
4884         }
4885
4886         if (list_empty(&dmar_rmrr_units))
4887                 pr_info("No RMRR found\n");
4888
4889         if (list_empty(&dmar_atsr_units))
4890                 pr_info("No ATSR found\n");
4891
4892         if (dmar_init_reserved_ranges()) {
4893                 if (force_on)
4894                         panic("tboot: Failed to reserve iommu ranges\n");
4895                 goto out_free_reserved_range;
4896         }
4897
4898         if (dmar_map_gfx)
4899                 intel_iommu_gfx_mapped = 1;
4900
4901         init_no_remapping_devices();
4902
4903         ret = init_dmars();
4904         if (ret) {
4905                 if (force_on)
4906                         panic("tboot: Failed to initialize DMARs\n");
4907                 pr_err("Initialization failed\n");
4908                 goto out_free_reserved_range;
4909         }
4910         up_write(&dmar_global_lock);
4911         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4912
4913 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4914         swiotlb = 0;
4915 #endif
4916         dma_ops = &intel_dma_ops;
4917
4918         init_iommu_pm_ops();
4919
4920         for_each_active_iommu(iommu, drhd) {
4921                 iommu_device_sysfs_add(&iommu->iommu, NULL,
4922                                        intel_iommu_groups,
4923                                        "%s", iommu->name);
4924                 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4925                 iommu_device_register(&iommu->iommu);
4926         }
4927
4928         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4929         bus_register_notifier(&pci_bus_type, &device_nb);
4930         if (si_domain && !hw_pass_through)
4931                 register_memory_notifier(&intel_iommu_memory_nb);
4932         cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4933                           intel_iommu_cpu_dead);
4934         intel_iommu_enabled = 1;
4935         intel_iommu_debugfs_init();
4936
4937         return 0;
4938
4939 out_free_reserved_range:
4940         put_iova_domain(&reserved_iova_list);
4941 out_free_dmar:
4942         intel_iommu_free_dmars();
4943         up_write(&dmar_global_lock);
4944         iommu_exit_mempool();
4945         return ret;
4946 }
4947
4948 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4949 {
4950         struct intel_iommu *iommu = opaque;
4951
4952         domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4953         return 0;
4954 }
4955
4956 /*
4957  * NB - intel-iommu lacks any sort of reference counting for the users of
4958  * dependent devices.  If multiple endpoints have intersecting dependent
4959  * devices, unbinding the driver from any one of them will possibly leave
4960  * the others unable to operate.
4961  */
4962 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4963 {
4964         if (!iommu || !dev || !dev_is_pci(dev))
4965                 return;
4966
4967         pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4968 }
4969
4970 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4971 {
4972         struct intel_iommu *iommu;
4973         unsigned long flags;
4974
4975         assert_spin_locked(&device_domain_lock);
4976
4977         if (WARN_ON(!info))
4978                 return;
4979
4980         iommu = info->iommu;
4981
4982         if (info->dev) {
4983                 if (dev_is_pci(info->dev) && sm_supported(iommu))
4984                         intel_pasid_tear_down_entry(iommu, info->dev,
4985                                         PASID_RID2PASID);
4986
4987                 iommu_disable_dev_iotlb(info);
4988                 domain_context_clear(iommu, info->dev);
4989                 intel_pasid_free_table(info->dev);
4990         }
4991
4992         unlink_domain_info(info);
4993
4994         spin_lock_irqsave(&iommu->lock, flags);
4995         domain_detach_iommu(info->domain, iommu);
4996         spin_unlock_irqrestore(&iommu->lock, flags);
4997
4998         free_devinfo_mem(info);
4999 }
5000
5001 static void dmar_remove_one_dev_info(struct device *dev)
5002 {
5003         struct device_domain_info *info;
5004         unsigned long flags;
5005
5006         spin_lock_irqsave(&device_domain_lock, flags);
5007         info = dev->archdata.iommu;
5008         __dmar_remove_one_dev_info(info);
5009         spin_unlock_irqrestore(&device_domain_lock, flags);
5010 }
5011
5012 static int md_domain_init(struct dmar_domain *domain, int guest_width)
5013 {
5014         int adjust_width;
5015
5016         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
5017         domain_reserve_special_ranges(domain);
5018
5019         /* calculate AGAW */
5020         domain->gaw = guest_width;
5021         adjust_width = guestwidth_to_adjustwidth(guest_width);
5022         domain->agaw = width_to_agaw(adjust_width);
5023
5024         domain->iommu_coherency = 0;
5025         domain->iommu_snooping = 0;
5026         domain->iommu_superpage = 0;
5027         domain->max_addr = 0;
5028
5029         /* always allocate the top pgd */
5030         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5031         if (!domain->pgd)
5032                 return -ENOMEM;
5033         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5034         return 0;
5035 }
5036
5037 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5038 {
5039         struct dmar_domain *dmar_domain;
5040         struct iommu_domain *domain;
5041
5042         if (type != IOMMU_DOMAIN_UNMANAGED)
5043                 return NULL;
5044
5045         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
5046         if (!dmar_domain) {
5047                 pr_err("Can't allocate dmar_domain\n");
5048                 return NULL;
5049         }
5050         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5051                 pr_err("Domain initialization failed\n");
5052                 domain_exit(dmar_domain);
5053                 return NULL;
5054         }
5055         domain_update_iommu_cap(dmar_domain);
5056
5057         domain = &dmar_domain->domain;
5058         domain->geometry.aperture_start = 0;
5059         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5060         domain->geometry.force_aperture = true;
5061
5062         return domain;
5063 }
5064
5065 static void intel_iommu_domain_free(struct iommu_domain *domain)
5066 {
5067         domain_exit(to_dmar_domain(domain));
5068 }
5069
5070 /*
5071  * Check whether a @domain could be attached to the @dev through the
5072  * aux-domain attach/detach APIs.
5073  */
5074 static inline bool
5075 is_aux_domain(struct device *dev, struct iommu_domain *domain)
5076 {
5077         struct device_domain_info *info = dev->archdata.iommu;
5078
5079         return info && info->auxd_enabled &&
5080                         domain->type == IOMMU_DOMAIN_UNMANAGED;
5081 }
5082
5083 static void auxiliary_link_device(struct dmar_domain *domain,
5084                                   struct device *dev)
5085 {
5086         struct device_domain_info *info = dev->archdata.iommu;
5087
5088         assert_spin_locked(&device_domain_lock);
5089         if (WARN_ON(!info))
5090                 return;
5091
5092         domain->auxd_refcnt++;
5093         list_add(&domain->auxd, &info->auxiliary_domains);
5094 }
5095
5096 static void auxiliary_unlink_device(struct dmar_domain *domain,
5097                                     struct device *dev)
5098 {
5099         struct device_domain_info *info = dev->archdata.iommu;
5100
5101         assert_spin_locked(&device_domain_lock);
5102         if (WARN_ON(!info))
5103                 return;
5104
5105         list_del(&domain->auxd);
5106         domain->auxd_refcnt--;
5107
5108         if (!domain->auxd_refcnt && domain->default_pasid > 0)
5109                 intel_pasid_free_id(domain->default_pasid);
5110 }
5111
5112 static int aux_domain_add_dev(struct dmar_domain *domain,
5113                               struct device *dev)
5114 {
5115         int ret;
5116         u8 bus, devfn;
5117         unsigned long flags;
5118         struct intel_iommu *iommu;
5119
5120         iommu = device_to_iommu(dev, &bus, &devfn);
5121         if (!iommu)
5122                 return -ENODEV;
5123
5124         if (domain->default_pasid <= 0) {
5125                 int pasid;
5126
5127                 pasid = intel_pasid_alloc_id(domain, PASID_MIN,
5128                                              pci_max_pasids(to_pci_dev(dev)),
5129                                              GFP_KERNEL);
5130                 if (pasid <= 0) {
5131                         pr_err("Can't allocate default pasid\n");
5132                         return -ENODEV;
5133                 }
5134                 domain->default_pasid = pasid;
5135         }
5136
5137         spin_lock_irqsave(&device_domain_lock, flags);
5138         /*
5139          * iommu->lock must be held to attach domain to iommu and setup the
5140          * pasid entry for second level translation.
5141          */
5142         spin_lock(&iommu->lock);
5143         ret = domain_attach_iommu(domain, iommu);
5144         if (ret)
5145                 goto attach_failed;
5146
5147         /* Setup the PASID entry for mediated devices: */
5148         ret = intel_pasid_setup_second_level(iommu, domain, dev,
5149                                              domain->default_pasid);
5150         if (ret)
5151                 goto table_failed;
5152         spin_unlock(&iommu->lock);
5153
5154         auxiliary_link_device(domain, dev);
5155
5156         spin_unlock_irqrestore(&device_domain_lock, flags);
5157
5158         return 0;
5159
5160 table_failed:
5161         domain_detach_iommu(domain, iommu);
5162 attach_failed:
5163         spin_unlock(&iommu->lock);
5164         spin_unlock_irqrestore(&device_domain_lock, flags);
5165         if (!domain->auxd_refcnt && domain->default_pasid > 0)
5166                 intel_pasid_free_id(domain->default_pasid);
5167
5168         return ret;
5169 }
5170
5171 static void aux_domain_remove_dev(struct dmar_domain *domain,
5172                                   struct device *dev)
5173 {
5174         struct device_domain_info *info;
5175         struct intel_iommu *iommu;
5176         unsigned long flags;
5177
5178         if (!is_aux_domain(dev, &domain->domain))
5179                 return;
5180
5181         spin_lock_irqsave(&device_domain_lock, flags);
5182         info = dev->archdata.iommu;
5183         iommu = info->iommu;
5184
5185         auxiliary_unlink_device(domain, dev);
5186
5187         spin_lock(&iommu->lock);
5188         intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
5189         domain_detach_iommu(domain, iommu);
5190         spin_unlock(&iommu->lock);
5191
5192         spin_unlock_irqrestore(&device_domain_lock, flags);
5193 }
5194
5195 static int prepare_domain_attach_device(struct iommu_domain *domain,
5196                                         struct device *dev)
5197 {
5198         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5199         struct intel_iommu *iommu;
5200         int addr_width;
5201         u8 bus, devfn;
5202
5203         iommu = device_to_iommu(dev, &bus, &devfn);
5204         if (!iommu)
5205                 return -ENODEV;
5206
5207         /* check if this iommu agaw is sufficient for max mapped address */
5208         addr_width = agaw_to_width(iommu->agaw);
5209         if (addr_width > cap_mgaw(iommu->cap))
5210                 addr_width = cap_mgaw(iommu->cap);
5211
5212         if (dmar_domain->max_addr > (1LL << addr_width)) {
5213                 dev_err(dev, "%s: iommu width (%d) is not "
5214                         "sufficient for the mapped address (%llx)\n",
5215                         __func__, addr_width, dmar_domain->max_addr);
5216                 return -EFAULT;
5217         }
5218         dmar_domain->gaw = addr_width;
5219
5220         /*
5221          * Knock out extra levels of page tables if necessary
5222          */
5223         while (iommu->agaw < dmar_domain->agaw) {
5224                 struct dma_pte *pte;
5225
5226                 pte = dmar_domain->pgd;
5227                 if (dma_pte_present(pte)) {
5228                         dmar_domain->pgd = (struct dma_pte *)
5229                                 phys_to_virt(dma_pte_addr(pte));
5230                         free_pgtable_page(pte);
5231                 }
5232                 dmar_domain->agaw--;
5233         }
5234
5235         return 0;
5236 }
5237
5238 static int intel_iommu_attach_device(struct iommu_domain *domain,
5239                                      struct device *dev)
5240 {
5241         int ret;
5242
5243         if (device_is_rmrr_locked(dev)) {
5244                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
5245                 return -EPERM;
5246         }
5247
5248         if (is_aux_domain(dev, domain))
5249                 return -EPERM;
5250
5251         /* normally dev is not mapped */
5252         if (unlikely(domain_context_mapped(dev))) {
5253                 struct dmar_domain *old_domain;
5254
5255                 old_domain = find_domain(dev);
5256                 if (old_domain) {
5257                         rcu_read_lock();
5258                         dmar_remove_one_dev_info(dev);
5259                         rcu_read_unlock();
5260
5261                         if (!domain_type_is_vm_or_si(old_domain) &&
5262                             list_empty(&old_domain->devices))
5263                                 domain_exit(old_domain);
5264                 }
5265         }
5266
5267         ret = prepare_domain_attach_device(domain, dev);
5268         if (ret)
5269                 return ret;
5270
5271         return domain_add_dev_info(to_dmar_domain(domain), dev);
5272 }
5273
5274 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5275                                          struct device *dev)
5276 {
5277         int ret;
5278
5279         if (!is_aux_domain(dev, domain))
5280                 return -EPERM;
5281
5282         ret = prepare_domain_attach_device(domain, dev);
5283         if (ret)
5284                 return ret;
5285
5286         return aux_domain_add_dev(to_dmar_domain(domain), dev);
5287 }
5288
5289 static void intel_iommu_detach_device(struct iommu_domain *domain,
5290                                       struct device *dev)
5291 {
5292         dmar_remove_one_dev_info(dev);
5293 }
5294
5295 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5296                                           struct device *dev)
5297 {
5298         aux_domain_remove_dev(to_dmar_domain(domain), dev);
5299 }
5300
5301 static int intel_iommu_map(struct iommu_domain *domain,
5302                            unsigned long iova, phys_addr_t hpa,
5303                            size_t size, int iommu_prot)
5304 {
5305         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5306         u64 max_addr;
5307         int prot = 0;
5308         int ret;
5309
5310         if (iommu_prot & IOMMU_READ)
5311                 prot |= DMA_PTE_READ;
5312         if (iommu_prot & IOMMU_WRITE)
5313                 prot |= DMA_PTE_WRITE;
5314         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5315                 prot |= DMA_PTE_SNP;
5316
5317         max_addr = iova + size;
5318         if (dmar_domain->max_addr < max_addr) {
5319                 u64 end;
5320
5321                 /* check if minimum agaw is sufficient for mapped address */
5322                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5323                 if (end < max_addr) {
5324                         pr_err("%s: iommu width (%d) is not "
5325                                "sufficient for the mapped address (%llx)\n",
5326                                __func__, dmar_domain->gaw, max_addr);
5327                         return -EFAULT;
5328                 }
5329                 dmar_domain->max_addr = max_addr;
5330         }
5331         /* Round up size to next multiple of PAGE_SIZE, if it and
5332            the low bits of hpa would take us onto the next page */
5333         size = aligned_nrpages(hpa, size);
5334         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5335                                  hpa >> VTD_PAGE_SHIFT, size, prot);
5336         return ret;
5337 }
5338
5339 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5340                                 unsigned long iova, size_t size)
5341 {
5342         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5343         struct page *freelist = NULL;
5344         unsigned long start_pfn, last_pfn;
5345         unsigned int npages;
5346         int iommu_id, level = 0;
5347
5348         /* Cope with horrid API which requires us to unmap more than the
5349            size argument if it happens to be a large-page mapping. */
5350         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5351
5352         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5353                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5354
5355         start_pfn = iova >> VTD_PAGE_SHIFT;
5356         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5357
5358         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5359
5360         npages = last_pfn - start_pfn + 1;
5361
5362         for_each_domain_iommu(iommu_id, dmar_domain)
5363                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5364                                       start_pfn, npages, !freelist, 0);
5365
5366         dma_free_pagelist(freelist);
5367
5368         if (dmar_domain->max_addr == iova + size)
5369                 dmar_domain->max_addr = iova;
5370
5371         return size;
5372 }
5373
5374 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5375                                             dma_addr_t iova)
5376 {
5377         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5378         struct dma_pte *pte;
5379         int level = 0;
5380         u64 phys = 0;
5381
5382         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5383         if (pte)
5384                 phys = dma_pte_addr(pte);
5385
5386         return phys;
5387 }
5388
5389 static inline bool scalable_mode_support(void)
5390 {
5391         struct dmar_drhd_unit *drhd;
5392         struct intel_iommu *iommu;
5393         bool ret = true;
5394
5395         rcu_read_lock();
5396         for_each_active_iommu(iommu, drhd) {
5397                 if (!sm_supported(iommu)) {
5398                         ret = false;
5399                         break;
5400                 }
5401         }
5402         rcu_read_unlock();
5403
5404         return ret;
5405 }
5406
5407 static inline bool iommu_pasid_support(void)
5408 {
5409         struct dmar_drhd_unit *drhd;
5410         struct intel_iommu *iommu;
5411         bool ret = true;
5412
5413         rcu_read_lock();
5414         for_each_active_iommu(iommu, drhd) {
5415                 if (!pasid_supported(iommu)) {
5416                         ret = false;
5417                         break;
5418                 }
5419         }
5420         rcu_read_unlock();
5421
5422         return ret;
5423 }
5424
5425 static bool intel_iommu_capable(enum iommu_cap cap)
5426 {
5427         if (cap == IOMMU_CAP_CACHE_COHERENCY)
5428                 return domain_update_iommu_snooping(NULL) == 1;
5429         if (cap == IOMMU_CAP_INTR_REMAP)
5430                 return irq_remapping_enabled == 1;
5431
5432         return false;
5433 }
5434
5435 static int intel_iommu_add_device(struct device *dev)
5436 {
5437         struct intel_iommu *iommu;
5438         struct iommu_group *group;
5439         u8 bus, devfn;
5440
5441         iommu = device_to_iommu(dev, &bus, &devfn);
5442         if (!iommu)
5443                 return -ENODEV;
5444
5445         iommu_device_link(&iommu->iommu, dev);
5446
5447         group = iommu_group_get_for_dev(dev);
5448
5449         if (IS_ERR(group))
5450                 return PTR_ERR(group);
5451
5452         iommu_group_put(group);
5453         return 0;
5454 }
5455
5456 static void intel_iommu_remove_device(struct device *dev)
5457 {
5458         struct intel_iommu *iommu;
5459         u8 bus, devfn;
5460
5461         iommu = device_to_iommu(dev, &bus, &devfn);
5462         if (!iommu)
5463                 return;
5464
5465         iommu_group_remove_device(dev);
5466
5467         iommu_device_unlink(&iommu->iommu, dev);
5468 }
5469
5470 static void intel_iommu_get_resv_regions(struct device *device,
5471                                          struct list_head *head)
5472 {
5473         struct iommu_resv_region *reg;
5474         struct dmar_rmrr_unit *rmrr;
5475         struct device *i_dev;
5476         int i;
5477
5478         rcu_read_lock();
5479         for_each_rmrr_units(rmrr) {
5480                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5481                                           i, i_dev) {
5482                         if (i_dev != device)
5483                                 continue;
5484
5485                         list_add_tail(&rmrr->resv->list, head);
5486                 }
5487         }
5488         rcu_read_unlock();
5489
5490         reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5491                                       IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5492                                       0, IOMMU_RESV_MSI);
5493         if (!reg)
5494                 return;
5495         list_add_tail(&reg->list, head);
5496 }
5497
5498 static void intel_iommu_put_resv_regions(struct device *dev,
5499                                          struct list_head *head)
5500 {
5501         struct iommu_resv_region *entry, *next;
5502
5503         list_for_each_entry_safe(entry, next, head, list) {
5504                 if (entry->type == IOMMU_RESV_MSI)
5505                         kfree(entry);
5506         }
5507 }
5508
5509 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5510 {
5511         struct device_domain_info *info;
5512         struct context_entry *context;
5513         struct dmar_domain *domain;
5514         unsigned long flags;
5515         u64 ctx_lo;
5516         int ret;
5517
5518         domain = get_valid_domain_for_dev(dev);
5519         if (!domain)
5520                 return -EINVAL;
5521
5522         spin_lock_irqsave(&device_domain_lock, flags);
5523         spin_lock(&iommu->lock);
5524
5525         ret = -EINVAL;
5526         info = dev->archdata.iommu;
5527         if (!info || !info->pasid_supported)
5528                 goto out;
5529
5530         context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5531         if (WARN_ON(!context))
5532                 goto out;
5533
5534         ctx_lo = context[0].lo;
5535
5536         if (!(ctx_lo & CONTEXT_PASIDE)) {
5537                 ctx_lo |= CONTEXT_PASIDE;
5538                 context[0].lo = ctx_lo;
5539                 wmb();
5540                 iommu->flush.flush_context(iommu,
5541                                            domain->iommu_did[iommu->seq_id],
5542                                            PCI_DEVID(info->bus, info->devfn),
5543                                            DMA_CCMD_MASK_NOBIT,
5544                                            DMA_CCMD_DEVICE_INVL);
5545         }
5546
5547         /* Enable PASID support in the device, if it wasn't already */
5548         if (!info->pasid_enabled)
5549                 iommu_enable_dev_iotlb(info);
5550
5551         ret = 0;
5552
5553  out:
5554         spin_unlock(&iommu->lock);
5555         spin_unlock_irqrestore(&device_domain_lock, flags);
5556
5557         return ret;
5558 }
5559
5560 #ifdef CONFIG_INTEL_IOMMU_SVM
5561 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5562 {
5563         struct intel_iommu *iommu;
5564         u8 bus, devfn;
5565
5566         if (iommu_dummy(dev)) {
5567                 dev_warn(dev,
5568                          "No IOMMU translation for device; cannot enable SVM\n");
5569                 return NULL;
5570         }
5571
5572         iommu = device_to_iommu(dev, &bus, &devfn);
5573         if ((!iommu)) {
5574                 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5575                 return NULL;
5576         }
5577
5578         return iommu;
5579 }
5580 #endif /* CONFIG_INTEL_IOMMU_SVM */
5581
5582 static int intel_iommu_enable_auxd(struct device *dev)
5583 {
5584         struct device_domain_info *info;
5585         struct intel_iommu *iommu;
5586         unsigned long flags;
5587         u8 bus, devfn;
5588         int ret;
5589
5590         iommu = device_to_iommu(dev, &bus, &devfn);
5591         if (!iommu || dmar_disabled)
5592                 return -EINVAL;
5593
5594         if (!sm_supported(iommu) || !pasid_supported(iommu))
5595                 return -EINVAL;
5596
5597         ret = intel_iommu_enable_pasid(iommu, dev);
5598         if (ret)
5599                 return -ENODEV;
5600
5601         spin_lock_irqsave(&device_domain_lock, flags);
5602         info = dev->archdata.iommu;
5603         info->auxd_enabled = 1;
5604         spin_unlock_irqrestore(&device_domain_lock, flags);
5605
5606         return 0;
5607 }
5608
5609 static int intel_iommu_disable_auxd(struct device *dev)
5610 {
5611         struct device_domain_info *info;
5612         unsigned long flags;
5613
5614         spin_lock_irqsave(&device_domain_lock, flags);
5615         info = dev->archdata.iommu;
5616         if (!WARN_ON(!info))
5617                 info->auxd_enabled = 0;
5618         spin_unlock_irqrestore(&device_domain_lock, flags);
5619
5620         return 0;
5621 }
5622
5623 /*
5624  * A PCI express designated vendor specific extended capability is defined
5625  * in the section 3.7 of Intel scalable I/O virtualization technical spec
5626  * for system software and tools to detect endpoint devices supporting the
5627  * Intel scalable IO virtualization without host driver dependency.
5628  *
5629  * Returns the address of the matching extended capability structure within
5630  * the device's PCI configuration space or 0 if the device does not support
5631  * it.
5632  */
5633 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5634 {
5635         int pos;
5636         u16 vendor, id;
5637
5638         pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5639         while (pos) {
5640                 pci_read_config_word(pdev, pos + 4, &vendor);
5641                 pci_read_config_word(pdev, pos + 8, &id);
5642                 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5643                         return pos;
5644
5645                 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5646         }
5647
5648         return 0;
5649 }
5650
5651 static bool
5652 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5653 {
5654         if (feat == IOMMU_DEV_FEAT_AUX) {
5655                 int ret;
5656
5657                 if (!dev_is_pci(dev) || dmar_disabled ||
5658                     !scalable_mode_support() || !iommu_pasid_support())
5659                         return false;
5660
5661                 ret = pci_pasid_features(to_pci_dev(dev));
5662                 if (ret < 0)
5663                         return false;
5664
5665                 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5666         }
5667
5668         return false;
5669 }
5670
5671 static int
5672 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5673 {
5674         if (feat == IOMMU_DEV_FEAT_AUX)
5675                 return intel_iommu_enable_auxd(dev);
5676
5677         return -ENODEV;
5678 }
5679
5680 static int
5681 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5682 {
5683         if (feat == IOMMU_DEV_FEAT_AUX)
5684                 return intel_iommu_disable_auxd(dev);
5685
5686         return -ENODEV;
5687 }
5688
5689 static bool
5690 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5691 {
5692         struct device_domain_info *info = dev->archdata.iommu;
5693
5694         if (feat == IOMMU_DEV_FEAT_AUX)
5695                 return scalable_mode_support() && info && info->auxd_enabled;
5696
5697         return false;
5698 }
5699
5700 static int
5701 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5702 {
5703         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5704
5705         return dmar_domain->default_pasid > 0 ?
5706                         dmar_domain->default_pasid : -EINVAL;
5707 }
5708
5709 const struct iommu_ops intel_iommu_ops = {
5710         .capable                = intel_iommu_capable,
5711         .domain_alloc           = intel_iommu_domain_alloc,
5712         .domain_free            = intel_iommu_domain_free,
5713         .attach_dev             = intel_iommu_attach_device,
5714         .detach_dev             = intel_iommu_detach_device,
5715         .aux_attach_dev         = intel_iommu_aux_attach_device,
5716         .aux_detach_dev         = intel_iommu_aux_detach_device,
5717         .aux_get_pasid          = intel_iommu_aux_get_pasid,
5718         .map                    = intel_iommu_map,
5719         .unmap                  = intel_iommu_unmap,
5720         .iova_to_phys           = intel_iommu_iova_to_phys,
5721         .add_device             = intel_iommu_add_device,
5722         .remove_device          = intel_iommu_remove_device,
5723         .get_resv_regions       = intel_iommu_get_resv_regions,
5724         .put_resv_regions       = intel_iommu_put_resv_regions,
5725         .device_group           = pci_device_group,
5726         .dev_has_feat           = intel_iommu_dev_has_feat,
5727         .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
5728         .dev_enable_feat        = intel_iommu_dev_enable_feat,
5729         .dev_disable_feat       = intel_iommu_dev_disable_feat,
5730         .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
5731 };
5732
5733 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5734 {
5735         /* G4x/GM45 integrated gfx dmar support is totally busted. */
5736         pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5737         dmar_map_gfx = 0;
5738 }
5739
5740 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5741 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5742 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5743 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5744 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5745 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5746 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5747
5748 static void quirk_iommu_rwbf(struct pci_dev *dev)
5749 {
5750         /*
5751          * Mobile 4 Series Chipset neglects to set RWBF capability,
5752          * but needs it. Same seems to hold for the desktop versions.
5753          */
5754         pci_info(dev, "Forcing write-buffer flush capability\n");
5755         rwbf_quirk = 1;
5756 }
5757
5758 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5759 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5760 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5761 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5762 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5763 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5764 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5765
5766 #define GGC 0x52
5767 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
5768 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
5769 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
5770 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
5771 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
5772 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
5773 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
5774 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
5775
5776 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5777 {
5778         unsigned short ggc;
5779
5780         if (pci_read_config_word(dev, GGC, &ggc))
5781                 return;
5782
5783         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5784                 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5785                 dmar_map_gfx = 0;
5786         } else if (dmar_map_gfx) {
5787                 /* we have to ensure the gfx device is idle before we flush */
5788                 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5789                 intel_iommu_strict = 1;
5790        }
5791 }
5792 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5793 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5794 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5795 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5796
5797 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5798    ISOCH DMAR unit for the Azalia sound device, but not give it any
5799    TLB entries, which causes it to deadlock. Check for that.  We do
5800    this in a function called from init_dmars(), instead of in a PCI
5801    quirk, because we don't want to print the obnoxious "BIOS broken"
5802    message if VT-d is actually disabled.
5803 */
5804 static void __init check_tylersburg_isoch(void)
5805 {
5806         struct pci_dev *pdev;
5807         uint32_t vtisochctrl;
5808
5809         /* If there's no Azalia in the system anyway, forget it. */
5810         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5811         if (!pdev)
5812                 return;
5813         pci_dev_put(pdev);
5814
5815         /* System Management Registers. Might be hidden, in which case
5816            we can't do the sanity check. But that's OK, because the
5817            known-broken BIOSes _don't_ actually hide it, so far. */
5818         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5819         if (!pdev)
5820                 return;
5821
5822         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5823                 pci_dev_put(pdev);
5824                 return;
5825         }
5826
5827         pci_dev_put(pdev);
5828
5829         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5830         if (vtisochctrl & 1)
5831                 return;
5832
5833         /* Drop all bits other than the number of TLB entries */
5834         vtisochctrl &= 0x1c;
5835
5836         /* If we have the recommended number of TLB entries (16), fine. */
5837         if (vtisochctrl == 0x10)
5838                 return;
5839
5840         /* Zero TLB entries? You get to ride the short bus to school. */
5841         if (!vtisochctrl) {
5842                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5843                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5844                      dmi_get_system_info(DMI_BIOS_VENDOR),
5845                      dmi_get_system_info(DMI_BIOS_VERSION),
5846                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5847                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5848                 return;
5849         }
5850
5851         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5852                vtisochctrl);
5853 }