iommu/vt-d: Make get_valid_domain_for_dev() take struct device
[linux-2.6-microblaze.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  */
19
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/cacheflush.h>
44 #include <asm/iommu.h>
45
46 #include "irq_remapping.h"
47 #include "pci.h"
48
49 #define ROOT_SIZE               VTD_PAGE_SIZE
50 #define CONTEXT_SIZE            VTD_PAGE_SIZE
51
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
55
56 #define IOAPIC_RANGE_START      (0xfee00000)
57 #define IOAPIC_RANGE_END        (0xfeefffff)
58 #define IOVA_START_ADDR         (0x1000)
59
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
64
65 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
71                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73
74 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
77
78 /* page table handling */
79 #define LEVEL_STRIDE            (9)
80 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
81
82 /*
83  * This bitmap is used to advertise the page sizes our hardware support
84  * to the IOMMU core, which will then use this information to split
85  * physically contiguous memory regions it is mapping into page sizes
86  * that we support.
87  *
88  * Traditionally the IOMMU core just handed us the mappings directly,
89  * after making sure the size is an order of a 4KiB page and that the
90  * mapping has natural alignment.
91  *
92  * To retain this behavior, we currently advertise that we support
93  * all page sizes that are an order of 4KiB.
94  *
95  * If at some point we'd like to utilize the IOMMU core's new behavior,
96  * we could change this to advertise the real page sizes we support.
97  */
98 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
99
100 static inline int agaw_to_level(int agaw)
101 {
102         return agaw + 2;
103 }
104
105 static inline int agaw_to_width(int agaw)
106 {
107         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
108 }
109
110 static inline int width_to_agaw(int width)
111 {
112         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
113 }
114
115 static inline unsigned int level_to_offset_bits(int level)
116 {
117         return (level - 1) * LEVEL_STRIDE;
118 }
119
120 static inline int pfn_level_offset(unsigned long pfn, int level)
121 {
122         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123 }
124
125 static inline unsigned long level_mask(int level)
126 {
127         return -1UL << level_to_offset_bits(level);
128 }
129
130 static inline unsigned long level_size(int level)
131 {
132         return 1UL << level_to_offset_bits(level);
133 }
134
135 static inline unsigned long align_to_level(unsigned long pfn, int level)
136 {
137         return (pfn + level_size(level) - 1) & level_mask(level);
138 }
139
140 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141 {
142         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
143 }
144
145 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146    are never going to work. */
147 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148 {
149         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150 }
151
152 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153 {
154         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 }
156 static inline unsigned long page_to_dma_pfn(struct page *pg)
157 {
158         return mm_to_dma_pfn(page_to_pfn(pg));
159 }
160 static inline unsigned long virt_to_dma_pfn(void *p)
161 {
162         return page_to_dma_pfn(virt_to_page(p));
163 }
164
165 /* global iommu list, set NULL for ignored DMAR units */
166 static struct intel_iommu **g_iommus;
167
168 static void __init check_tylersburg_isoch(void);
169 static int rwbf_quirk;
170
171 /*
172  * set to 1 to panic kernel if can't successfully enable VT-d
173  * (used when kernel is launched w/ TXT)
174  */
175 static int force_on = 0;
176
177 /*
178  * 0: Present
179  * 1-11: Reserved
180  * 12-63: Context Ptr (12 - (haw-1))
181  * 64-127: Reserved
182  */
183 struct root_entry {
184         u64     val;
185         u64     rsvd1;
186 };
187 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 static inline bool root_present(struct root_entry *root)
189 {
190         return (root->val & 1);
191 }
192 static inline void set_root_present(struct root_entry *root)
193 {
194         root->val |= 1;
195 }
196 static inline void set_root_value(struct root_entry *root, unsigned long value)
197 {
198         root->val |= value & VTD_PAGE_MASK;
199 }
200
201 static inline struct context_entry *
202 get_context_addr_from_root(struct root_entry *root)
203 {
204         return (struct context_entry *)
205                 (root_present(root)?phys_to_virt(
206                 root->val & VTD_PAGE_MASK) :
207                 NULL);
208 }
209
210 /*
211  * low 64 bits:
212  * 0: present
213  * 1: fault processing disable
214  * 2-3: translation type
215  * 12-63: address space root
216  * high 64 bits:
217  * 0-2: address width
218  * 3-6: aval
219  * 8-23: domain id
220  */
221 struct context_entry {
222         u64 lo;
223         u64 hi;
224 };
225
226 static inline bool context_present(struct context_entry *context)
227 {
228         return (context->lo & 1);
229 }
230 static inline void context_set_present(struct context_entry *context)
231 {
232         context->lo |= 1;
233 }
234
235 static inline void context_set_fault_enable(struct context_entry *context)
236 {
237         context->lo &= (((u64)-1) << 2) | 1;
238 }
239
240 static inline void context_set_translation_type(struct context_entry *context,
241                                                 unsigned long value)
242 {
243         context->lo &= (((u64)-1) << 4) | 3;
244         context->lo |= (value & 3) << 2;
245 }
246
247 static inline void context_set_address_root(struct context_entry *context,
248                                             unsigned long value)
249 {
250         context->lo |= value & VTD_PAGE_MASK;
251 }
252
253 static inline void context_set_address_width(struct context_entry *context,
254                                              unsigned long value)
255 {
256         context->hi |= value & 7;
257 }
258
259 static inline void context_set_domain_id(struct context_entry *context,
260                                          unsigned long value)
261 {
262         context->hi |= (value & ((1 << 16) - 1)) << 8;
263 }
264
265 static inline void context_clear_entry(struct context_entry *context)
266 {
267         context->lo = 0;
268         context->hi = 0;
269 }
270
271 /*
272  * 0: readable
273  * 1: writable
274  * 2-6: reserved
275  * 7: super page
276  * 8-10: available
277  * 11: snoop behavior
278  * 12-63: Host physcial address
279  */
280 struct dma_pte {
281         u64 val;
282 };
283
284 static inline void dma_clear_pte(struct dma_pte *pte)
285 {
286         pte->val = 0;
287 }
288
289 static inline u64 dma_pte_addr(struct dma_pte *pte)
290 {
291 #ifdef CONFIG_64BIT
292         return pte->val & VTD_PAGE_MASK;
293 #else
294         /* Must have a full atomic 64-bit read */
295         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
296 #endif
297 }
298
299 static inline bool dma_pte_present(struct dma_pte *pte)
300 {
301         return (pte->val & 3) != 0;
302 }
303
304 static inline bool dma_pte_superpage(struct dma_pte *pte)
305 {
306         return (pte->val & (1 << 7));
307 }
308
309 static inline int first_pte_in_page(struct dma_pte *pte)
310 {
311         return !((unsigned long)pte & ~VTD_PAGE_MASK);
312 }
313
314 /*
315  * This domain is a statically identity mapping domain.
316  *      1. This domain creats a static 1:1 mapping to all usable memory.
317  *      2. It maps to each iommu if successful.
318  *      3. Each iommu mapps to this domain if successful.
319  */
320 static struct dmar_domain *si_domain;
321 static int hw_pass_through = 1;
322
323 /* devices under the same p2p bridge are owned in one domain */
324 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
325
326 /* domain represents a virtual machine, more than one devices
327  * across iommus may be owned in one domain, e.g. kvm guest.
328  */
329 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 1)
330
331 /* si_domain contains mulitple devices */
332 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 2)
333
334 /* define the limit of IOMMUs supported in each domain */
335 #ifdef  CONFIG_X86
336 # define        IOMMU_UNITS_SUPPORTED   MAX_IO_APICS
337 #else
338 # define        IOMMU_UNITS_SUPPORTED   64
339 #endif
340
341 struct dmar_domain {
342         int     id;                     /* domain id */
343         int     nid;                    /* node id */
344         DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
345                                         /* bitmap of iommus this domain uses*/
346
347         struct list_head devices;       /* all devices' list */
348         struct iova_domain iovad;       /* iova's that belong to this domain */
349
350         struct dma_pte  *pgd;           /* virtual address */
351         int             gaw;            /* max guest address width */
352
353         /* adjusted guest address width, 0 is level 2 30-bit */
354         int             agaw;
355
356         int             flags;          /* flags to find out type of domain */
357
358         int             iommu_coherency;/* indicate coherency of iommu access */
359         int             iommu_snooping; /* indicate snooping control feature*/
360         int             iommu_count;    /* reference count of iommu */
361         int             iommu_superpage;/* Level of superpages supported:
362                                            0 == 4KiB (no superpages), 1 == 2MiB,
363                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
364         spinlock_t      iommu_lock;     /* protect iommu set in domain */
365         u64             max_addr;       /* maximum mapped address */
366 };
367
368 /* PCI domain-device relationship */
369 struct device_domain_info {
370         struct list_head link;  /* link to domain siblings */
371         struct list_head global; /* link to global list */
372         u8 bus;                 /* PCI bus number */
373         u8 devfn;               /* PCI devfn number */
374         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
375         struct intel_iommu *iommu; /* IOMMU used by this device */
376         struct dmar_domain *domain; /* pointer to domain */
377 };
378
379 struct dmar_rmrr_unit {
380         struct list_head list;          /* list of rmrr units   */
381         struct acpi_dmar_header *hdr;   /* ACPI header          */
382         u64     base_address;           /* reserved base address*/
383         u64     end_address;            /* reserved end address */
384         struct dmar_dev_scope *devices; /* target devices */
385         int     devices_cnt;            /* target device count */
386 };
387
388 struct dmar_atsr_unit {
389         struct list_head list;          /* list of ATSR units */
390         struct acpi_dmar_header *hdr;   /* ACPI header */
391         struct dmar_dev_scope *devices; /* target devices */
392         int devices_cnt;                /* target device count */
393         u8 include_all:1;               /* include all ports */
394 };
395
396 static LIST_HEAD(dmar_atsr_units);
397 static LIST_HEAD(dmar_rmrr_units);
398
399 #define for_each_rmrr_units(rmrr) \
400         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
401
402 static void flush_unmaps_timeout(unsigned long data);
403
404 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
405
406 #define HIGH_WATER_MARK 250
407 struct deferred_flush_tables {
408         int next;
409         struct iova *iova[HIGH_WATER_MARK];
410         struct dmar_domain *domain[HIGH_WATER_MARK];
411         struct page *freelist[HIGH_WATER_MARK];
412 };
413
414 static struct deferred_flush_tables *deferred_flush;
415
416 /* bitmap for indexing intel_iommus */
417 static int g_num_of_iommus;
418
419 static DEFINE_SPINLOCK(async_umap_flush_lock);
420 static LIST_HEAD(unmaps_to_do);
421
422 static int timer_on;
423 static long list_size;
424
425 static void domain_exit(struct dmar_domain *domain);
426 static void domain_remove_dev_info(struct dmar_domain *domain);
427 static void domain_remove_one_dev_info(struct dmar_domain *domain,
428                                        struct pci_dev *pdev);
429 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
430                                            struct device *dev);
431
432 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
433 int dmar_disabled = 0;
434 #else
435 int dmar_disabled = 1;
436 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
437
438 int intel_iommu_enabled = 0;
439 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
440
441 static int dmar_map_gfx = 1;
442 static int dmar_forcedac;
443 static int intel_iommu_strict;
444 static int intel_iommu_superpage = 1;
445
446 int intel_iommu_gfx_mapped;
447 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
448
449 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
450 static DEFINE_SPINLOCK(device_domain_lock);
451 static LIST_HEAD(device_domain_list);
452
453 static struct iommu_ops intel_iommu_ops;
454
455 static int __init intel_iommu_setup(char *str)
456 {
457         if (!str)
458                 return -EINVAL;
459         while (*str) {
460                 if (!strncmp(str, "on", 2)) {
461                         dmar_disabled = 0;
462                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
463                 } else if (!strncmp(str, "off", 3)) {
464                         dmar_disabled = 1;
465                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
466                 } else if (!strncmp(str, "igfx_off", 8)) {
467                         dmar_map_gfx = 0;
468                         printk(KERN_INFO
469                                 "Intel-IOMMU: disable GFX device mapping\n");
470                 } else if (!strncmp(str, "forcedac", 8)) {
471                         printk(KERN_INFO
472                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
473                         dmar_forcedac = 1;
474                 } else if (!strncmp(str, "strict", 6)) {
475                         printk(KERN_INFO
476                                 "Intel-IOMMU: disable batched IOTLB flush\n");
477                         intel_iommu_strict = 1;
478                 } else if (!strncmp(str, "sp_off", 6)) {
479                         printk(KERN_INFO
480                                 "Intel-IOMMU: disable supported super page\n");
481                         intel_iommu_superpage = 0;
482                 }
483
484                 str += strcspn(str, ",");
485                 while (*str == ',')
486                         str++;
487         }
488         return 0;
489 }
490 __setup("intel_iommu=", intel_iommu_setup);
491
492 static struct kmem_cache *iommu_domain_cache;
493 static struct kmem_cache *iommu_devinfo_cache;
494 static struct kmem_cache *iommu_iova_cache;
495
496 static inline void *alloc_pgtable_page(int node)
497 {
498         struct page *page;
499         void *vaddr = NULL;
500
501         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
502         if (page)
503                 vaddr = page_address(page);
504         return vaddr;
505 }
506
507 static inline void free_pgtable_page(void *vaddr)
508 {
509         free_page((unsigned long)vaddr);
510 }
511
512 static inline void *alloc_domain_mem(void)
513 {
514         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
515 }
516
517 static void free_domain_mem(void *vaddr)
518 {
519         kmem_cache_free(iommu_domain_cache, vaddr);
520 }
521
522 static inline void * alloc_devinfo_mem(void)
523 {
524         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
525 }
526
527 static inline void free_devinfo_mem(void *vaddr)
528 {
529         kmem_cache_free(iommu_devinfo_cache, vaddr);
530 }
531
532 struct iova *alloc_iova_mem(void)
533 {
534         return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
535 }
536
537 void free_iova_mem(struct iova *iova)
538 {
539         kmem_cache_free(iommu_iova_cache, iova);
540 }
541
542
543 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
544 {
545         unsigned long sagaw;
546         int agaw = -1;
547
548         sagaw = cap_sagaw(iommu->cap);
549         for (agaw = width_to_agaw(max_gaw);
550              agaw >= 0; agaw--) {
551                 if (test_bit(agaw, &sagaw))
552                         break;
553         }
554
555         return agaw;
556 }
557
558 /*
559  * Calculate max SAGAW for each iommu.
560  */
561 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
562 {
563         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
564 }
565
566 /*
567  * calculate agaw for each iommu.
568  * "SAGAW" may be different across iommus, use a default agaw, and
569  * get a supported less agaw for iommus that don't support the default agaw.
570  */
571 int iommu_calculate_agaw(struct intel_iommu *iommu)
572 {
573         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
574 }
575
576 /* This functionin only returns single iommu in a domain */
577 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
578 {
579         int iommu_id;
580
581         /* si_domain and vm domain should not get here. */
582         BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
583         BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
584
585         iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
586         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
587                 return NULL;
588
589         return g_iommus[iommu_id];
590 }
591
592 static void domain_update_iommu_coherency(struct dmar_domain *domain)
593 {
594         struct dmar_drhd_unit *drhd;
595         struct intel_iommu *iommu;
596         int i, found = 0;
597
598         domain->iommu_coherency = 1;
599
600         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
601                 found = 1;
602                 if (!ecap_coherent(g_iommus[i]->ecap)) {
603                         domain->iommu_coherency = 0;
604                         break;
605                 }
606         }
607         if (found)
608                 return;
609
610         /* No hardware attached; use lowest common denominator */
611         rcu_read_lock();
612         for_each_active_iommu(iommu, drhd) {
613                 if (!ecap_coherent(iommu->ecap)) {
614                         domain->iommu_coherency = 0;
615                         break;
616                 }
617         }
618         rcu_read_unlock();
619 }
620
621 static void domain_update_iommu_snooping(struct dmar_domain *domain)
622 {
623         int i;
624
625         domain->iommu_snooping = 1;
626
627         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
628                 if (!ecap_sc_support(g_iommus[i]->ecap)) {
629                         domain->iommu_snooping = 0;
630                         break;
631                 }
632         }
633 }
634
635 static void domain_update_iommu_superpage(struct dmar_domain *domain)
636 {
637         struct dmar_drhd_unit *drhd;
638         struct intel_iommu *iommu = NULL;
639         int mask = 0xf;
640
641         if (!intel_iommu_superpage) {
642                 domain->iommu_superpage = 0;
643                 return;
644         }
645
646         /* set iommu_superpage to the smallest common denominator */
647         rcu_read_lock();
648         for_each_active_iommu(iommu, drhd) {
649                 mask &= cap_super_page_val(iommu->cap);
650                 if (!mask) {
651                         break;
652                 }
653         }
654         rcu_read_unlock();
655
656         domain->iommu_superpage = fls(mask);
657 }
658
659 /* Some capabilities may be different across iommus */
660 static void domain_update_iommu_cap(struct dmar_domain *domain)
661 {
662         domain_update_iommu_coherency(domain);
663         domain_update_iommu_snooping(domain);
664         domain_update_iommu_superpage(domain);
665 }
666
667 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
668 {
669         struct dmar_drhd_unit *drhd = NULL;
670         struct intel_iommu *iommu;
671         struct device *tmp;
672         struct pci_dev *ptmp, *pdev = NULL;
673         u16 segment;
674         int i;
675
676         if (dev_is_pci(dev)) {
677                 pdev = to_pci_dev(dev);
678                 segment = pci_domain_nr(pdev->bus);
679         } else if (ACPI_COMPANION(dev))
680                 dev = &ACPI_COMPANION(dev)->dev;
681
682         rcu_read_lock();
683         for_each_active_iommu(iommu, drhd) {
684                 if (pdev && segment != drhd->segment)
685                         continue;
686
687                 for_each_active_dev_scope(drhd->devices,
688                                           drhd->devices_cnt, i, tmp) {
689                         if (tmp == dev) {
690                                 *bus = drhd->devices[i].bus;
691                                 *devfn = drhd->devices[i].devfn;
692                                 goto out;
693                         }
694
695                         if (!pdev || !dev_is_pci(tmp))
696                                 continue;
697
698                         ptmp = to_pci_dev(tmp);
699                         if (ptmp->subordinate &&
700                             ptmp->subordinate->number <= pdev->bus->number &&
701                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
702                                 goto got_pdev;
703                 }
704
705                 if (pdev && drhd->include_all) {
706                 got_pdev:
707                         *bus = pdev->bus->number;
708                         *devfn = pdev->devfn;
709                         goto out;
710                 }
711         }
712         iommu = NULL;
713  out:
714         rcu_read_unlock();
715
716         return iommu;
717 }
718
719 static void domain_flush_cache(struct dmar_domain *domain,
720                                void *addr, int size)
721 {
722         if (!domain->iommu_coherency)
723                 clflush_cache_range(addr, size);
724 }
725
726 /* Gets context entry for a given bus and devfn */
727 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
728                 u8 bus, u8 devfn)
729 {
730         struct root_entry *root;
731         struct context_entry *context;
732         unsigned long phy_addr;
733         unsigned long flags;
734
735         spin_lock_irqsave(&iommu->lock, flags);
736         root = &iommu->root_entry[bus];
737         context = get_context_addr_from_root(root);
738         if (!context) {
739                 context = (struct context_entry *)
740                                 alloc_pgtable_page(iommu->node);
741                 if (!context) {
742                         spin_unlock_irqrestore(&iommu->lock, flags);
743                         return NULL;
744                 }
745                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
746                 phy_addr = virt_to_phys((void *)context);
747                 set_root_value(root, phy_addr);
748                 set_root_present(root);
749                 __iommu_flush_cache(iommu, root, sizeof(*root));
750         }
751         spin_unlock_irqrestore(&iommu->lock, flags);
752         return &context[devfn];
753 }
754
755 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
756 {
757         struct root_entry *root;
758         struct context_entry *context;
759         int ret;
760         unsigned long flags;
761
762         spin_lock_irqsave(&iommu->lock, flags);
763         root = &iommu->root_entry[bus];
764         context = get_context_addr_from_root(root);
765         if (!context) {
766                 ret = 0;
767                 goto out;
768         }
769         ret = context_present(&context[devfn]);
770 out:
771         spin_unlock_irqrestore(&iommu->lock, flags);
772         return ret;
773 }
774
775 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
776 {
777         struct root_entry *root;
778         struct context_entry *context;
779         unsigned long flags;
780
781         spin_lock_irqsave(&iommu->lock, flags);
782         root = &iommu->root_entry[bus];
783         context = get_context_addr_from_root(root);
784         if (context) {
785                 context_clear_entry(&context[devfn]);
786                 __iommu_flush_cache(iommu, &context[devfn], \
787                         sizeof(*context));
788         }
789         spin_unlock_irqrestore(&iommu->lock, flags);
790 }
791
792 static void free_context_table(struct intel_iommu *iommu)
793 {
794         struct root_entry *root;
795         int i;
796         unsigned long flags;
797         struct context_entry *context;
798
799         spin_lock_irqsave(&iommu->lock, flags);
800         if (!iommu->root_entry) {
801                 goto out;
802         }
803         for (i = 0; i < ROOT_ENTRY_NR; i++) {
804                 root = &iommu->root_entry[i];
805                 context = get_context_addr_from_root(root);
806                 if (context)
807                         free_pgtable_page(context);
808         }
809         free_pgtable_page(iommu->root_entry);
810         iommu->root_entry = NULL;
811 out:
812         spin_unlock_irqrestore(&iommu->lock, flags);
813 }
814
815 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
816                                       unsigned long pfn, int *target_level)
817 {
818         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
819         struct dma_pte *parent, *pte = NULL;
820         int level = agaw_to_level(domain->agaw);
821         int offset;
822
823         BUG_ON(!domain->pgd);
824
825         if (addr_width < BITS_PER_LONG && pfn >> addr_width)
826                 /* Address beyond IOMMU's addressing capabilities. */
827                 return NULL;
828
829         parent = domain->pgd;
830
831         while (1) {
832                 void *tmp_page;
833
834                 offset = pfn_level_offset(pfn, level);
835                 pte = &parent[offset];
836                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
837                         break;
838                 if (level == *target_level)
839                         break;
840
841                 if (!dma_pte_present(pte)) {
842                         uint64_t pteval;
843
844                         tmp_page = alloc_pgtable_page(domain->nid);
845
846                         if (!tmp_page)
847                                 return NULL;
848
849                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
850                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
851                         if (cmpxchg64(&pte->val, 0ULL, pteval)) {
852                                 /* Someone else set it while we were thinking; use theirs. */
853                                 free_pgtable_page(tmp_page);
854                         } else {
855                                 dma_pte_addr(pte);
856                                 domain_flush_cache(domain, pte, sizeof(*pte));
857                         }
858                 }
859                 if (level == 1)
860                         break;
861
862                 parent = phys_to_virt(dma_pte_addr(pte));
863                 level--;
864         }
865
866         if (!*target_level)
867                 *target_level = level;
868
869         return pte;
870 }
871
872
873 /* return address's pte at specific level */
874 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
875                                          unsigned long pfn,
876                                          int level, int *large_page)
877 {
878         struct dma_pte *parent, *pte = NULL;
879         int total = agaw_to_level(domain->agaw);
880         int offset;
881
882         parent = domain->pgd;
883         while (level <= total) {
884                 offset = pfn_level_offset(pfn, total);
885                 pte = &parent[offset];
886                 if (level == total)
887                         return pte;
888
889                 if (!dma_pte_present(pte)) {
890                         *large_page = total;
891                         break;
892                 }
893
894                 if (pte->val & DMA_PTE_LARGE_PAGE) {
895                         *large_page = total;
896                         return pte;
897                 }
898
899                 parent = phys_to_virt(dma_pte_addr(pte));
900                 total--;
901         }
902         return NULL;
903 }
904
905 /* clear last level pte, a tlb flush should be followed */
906 static void dma_pte_clear_range(struct dmar_domain *domain,
907                                 unsigned long start_pfn,
908                                 unsigned long last_pfn)
909 {
910         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
911         unsigned int large_page = 1;
912         struct dma_pte *first_pte, *pte;
913
914         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
915         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
916         BUG_ON(start_pfn > last_pfn);
917
918         /* we don't need lock here; nobody else touches the iova range */
919         do {
920                 large_page = 1;
921                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
922                 if (!pte) {
923                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
924                         continue;
925                 }
926                 do {
927                         dma_clear_pte(pte);
928                         start_pfn += lvl_to_nr_pages(large_page);
929                         pte++;
930                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
931
932                 domain_flush_cache(domain, first_pte,
933                                    (void *)pte - (void *)first_pte);
934
935         } while (start_pfn && start_pfn <= last_pfn);
936 }
937
938 static void dma_pte_free_level(struct dmar_domain *domain, int level,
939                                struct dma_pte *pte, unsigned long pfn,
940                                unsigned long start_pfn, unsigned long last_pfn)
941 {
942         pfn = max(start_pfn, pfn);
943         pte = &pte[pfn_level_offset(pfn, level)];
944
945         do {
946                 unsigned long level_pfn;
947                 struct dma_pte *level_pte;
948
949                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
950                         goto next;
951
952                 level_pfn = pfn & level_mask(level - 1);
953                 level_pte = phys_to_virt(dma_pte_addr(pte));
954
955                 if (level > 2)
956                         dma_pte_free_level(domain, level - 1, level_pte,
957                                            level_pfn, start_pfn, last_pfn);
958
959                 /* If range covers entire pagetable, free it */
960                 if (!(start_pfn > level_pfn ||
961                       last_pfn < level_pfn + level_size(level) - 1)) {
962                         dma_clear_pte(pte);
963                         domain_flush_cache(domain, pte, sizeof(*pte));
964                         free_pgtable_page(level_pte);
965                 }
966 next:
967                 pfn += level_size(level);
968         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
969 }
970
971 /* free page table pages. last level pte should already be cleared */
972 static void dma_pte_free_pagetable(struct dmar_domain *domain,
973                                    unsigned long start_pfn,
974                                    unsigned long last_pfn)
975 {
976         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
977
978         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
979         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
980         BUG_ON(start_pfn > last_pfn);
981
982         /* We don't need lock here; nobody else touches the iova range */
983         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
984                            domain->pgd, 0, start_pfn, last_pfn);
985
986         /* free pgd */
987         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
988                 free_pgtable_page(domain->pgd);
989                 domain->pgd = NULL;
990         }
991 }
992
993 /* When a page at a given level is being unlinked from its parent, we don't
994    need to *modify* it at all. All we need to do is make a list of all the
995    pages which can be freed just as soon as we've flushed the IOTLB and we
996    know the hardware page-walk will no longer touch them.
997    The 'pte' argument is the *parent* PTE, pointing to the page that is to
998    be freed. */
999 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1000                                             int level, struct dma_pte *pte,
1001                                             struct page *freelist)
1002 {
1003         struct page *pg;
1004
1005         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1006         pg->freelist = freelist;
1007         freelist = pg;
1008
1009         if (level == 1)
1010                 return freelist;
1011
1012         for (pte = page_address(pg); !first_pte_in_page(pte); pte++) {
1013                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1014                         freelist = dma_pte_list_pagetables(domain, level - 1,
1015                                                            pte, freelist);
1016         }
1017
1018         return freelist;
1019 }
1020
1021 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1022                                         struct dma_pte *pte, unsigned long pfn,
1023                                         unsigned long start_pfn,
1024                                         unsigned long last_pfn,
1025                                         struct page *freelist)
1026 {
1027         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1028
1029         pfn = max(start_pfn, pfn);
1030         pte = &pte[pfn_level_offset(pfn, level)];
1031
1032         do {
1033                 unsigned long level_pfn;
1034
1035                 if (!dma_pte_present(pte))
1036                         goto next;
1037
1038                 level_pfn = pfn & level_mask(level);
1039
1040                 /* If range covers entire pagetable, free it */
1041                 if (start_pfn <= level_pfn &&
1042                     last_pfn >= level_pfn + level_size(level) - 1) {
1043                         /* These suborbinate page tables are going away entirely. Don't
1044                            bother to clear them; we're just going to *free* them. */
1045                         if (level > 1 && !dma_pte_superpage(pte))
1046                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1047
1048                         dma_clear_pte(pte);
1049                         if (!first_pte)
1050                                 first_pte = pte;
1051                         last_pte = pte;
1052                 } else if (level > 1) {
1053                         /* Recurse down into a level that isn't *entirely* obsolete */
1054                         freelist = dma_pte_clear_level(domain, level - 1,
1055                                                        phys_to_virt(dma_pte_addr(pte)),
1056                                                        level_pfn, start_pfn, last_pfn,
1057                                                        freelist);
1058                 }
1059 next:
1060                 pfn += level_size(level);
1061         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1062
1063         if (first_pte)
1064                 domain_flush_cache(domain, first_pte,
1065                                    (void *)++last_pte - (void *)first_pte);
1066
1067         return freelist;
1068 }
1069
1070 /* We can't just free the pages because the IOMMU may still be walking
1071    the page tables, and may have cached the intermediate levels. The
1072    pages can only be freed after the IOTLB flush has been done. */
1073 struct page *domain_unmap(struct dmar_domain *domain,
1074                           unsigned long start_pfn,
1075                           unsigned long last_pfn)
1076 {
1077         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1078         struct page *freelist = NULL;
1079
1080         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1081         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1082         BUG_ON(start_pfn > last_pfn);
1083
1084         /* we don't need lock here; nobody else touches the iova range */
1085         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1086                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1087
1088         /* free pgd */
1089         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1090                 struct page *pgd_page = virt_to_page(domain->pgd);
1091                 pgd_page->freelist = freelist;
1092                 freelist = pgd_page;
1093
1094                 domain->pgd = NULL;
1095         }
1096
1097         return freelist;
1098 }
1099
1100 void dma_free_pagelist(struct page *freelist)
1101 {
1102         struct page *pg;
1103
1104         while ((pg = freelist)) {
1105                 freelist = pg->freelist;
1106                 free_pgtable_page(page_address(pg));
1107         }
1108 }
1109
1110 /* iommu handling */
1111 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1112 {
1113         struct root_entry *root;
1114         unsigned long flags;
1115
1116         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1117         if (!root)
1118                 return -ENOMEM;
1119
1120         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1121
1122         spin_lock_irqsave(&iommu->lock, flags);
1123         iommu->root_entry = root;
1124         spin_unlock_irqrestore(&iommu->lock, flags);
1125
1126         return 0;
1127 }
1128
1129 static void iommu_set_root_entry(struct intel_iommu *iommu)
1130 {
1131         void *addr;
1132         u32 sts;
1133         unsigned long flag;
1134
1135         addr = iommu->root_entry;
1136
1137         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1138         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1139
1140         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1141
1142         /* Make sure hardware complete it */
1143         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1144                       readl, (sts & DMA_GSTS_RTPS), sts);
1145
1146         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1147 }
1148
1149 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1150 {
1151         u32 val;
1152         unsigned long flag;
1153
1154         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1155                 return;
1156
1157         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1158         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1159
1160         /* Make sure hardware complete it */
1161         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1162                       readl, (!(val & DMA_GSTS_WBFS)), val);
1163
1164         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1165 }
1166
1167 /* return value determine if we need a write buffer flush */
1168 static void __iommu_flush_context(struct intel_iommu *iommu,
1169                                   u16 did, u16 source_id, u8 function_mask,
1170                                   u64 type)
1171 {
1172         u64 val = 0;
1173         unsigned long flag;
1174
1175         switch (type) {
1176         case DMA_CCMD_GLOBAL_INVL:
1177                 val = DMA_CCMD_GLOBAL_INVL;
1178                 break;
1179         case DMA_CCMD_DOMAIN_INVL:
1180                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1181                 break;
1182         case DMA_CCMD_DEVICE_INVL:
1183                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1184                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1185                 break;
1186         default:
1187                 BUG();
1188         }
1189         val |= DMA_CCMD_ICC;
1190
1191         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1192         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1193
1194         /* Make sure hardware complete it */
1195         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1196                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1197
1198         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1199 }
1200
1201 /* return value determine if we need a write buffer flush */
1202 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1203                                 u64 addr, unsigned int size_order, u64 type)
1204 {
1205         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1206         u64 val = 0, val_iva = 0;
1207         unsigned long flag;
1208
1209         switch (type) {
1210         case DMA_TLB_GLOBAL_FLUSH:
1211                 /* global flush doesn't need set IVA_REG */
1212                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1213                 break;
1214         case DMA_TLB_DSI_FLUSH:
1215                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1216                 break;
1217         case DMA_TLB_PSI_FLUSH:
1218                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1219                 /* IH bit is passed in as part of address */
1220                 val_iva = size_order | addr;
1221                 break;
1222         default:
1223                 BUG();
1224         }
1225         /* Note: set drain read/write */
1226 #if 0
1227         /*
1228          * This is probably to be super secure.. Looks like we can
1229          * ignore it without any impact.
1230          */
1231         if (cap_read_drain(iommu->cap))
1232                 val |= DMA_TLB_READ_DRAIN;
1233 #endif
1234         if (cap_write_drain(iommu->cap))
1235                 val |= DMA_TLB_WRITE_DRAIN;
1236
1237         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1238         /* Note: Only uses first TLB reg currently */
1239         if (val_iva)
1240                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1241         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1242
1243         /* Make sure hardware complete it */
1244         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1245                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1246
1247         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1248
1249         /* check IOTLB invalidation granularity */
1250         if (DMA_TLB_IAIG(val) == 0)
1251                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1252         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1253                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1254                         (unsigned long long)DMA_TLB_IIRG(type),
1255                         (unsigned long long)DMA_TLB_IAIG(val));
1256 }
1257
1258 static struct device_domain_info *
1259 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1260                          u8 bus, u8 devfn)
1261 {
1262         int found = 0;
1263         unsigned long flags;
1264         struct device_domain_info *info;
1265         struct pci_dev *pdev;
1266
1267         if (!ecap_dev_iotlb_support(iommu->ecap))
1268                 return NULL;
1269
1270         if (!iommu->qi)
1271                 return NULL;
1272
1273         spin_lock_irqsave(&device_domain_lock, flags);
1274         list_for_each_entry(info, &domain->devices, link)
1275                 if (info->bus == bus && info->devfn == devfn) {
1276                         found = 1;
1277                         break;
1278                 }
1279         spin_unlock_irqrestore(&device_domain_lock, flags);
1280
1281         if (!found || !info->dev || !dev_is_pci(info->dev))
1282                 return NULL;
1283
1284         pdev = to_pci_dev(info->dev);
1285
1286         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1287                 return NULL;
1288
1289         if (!dmar_find_matched_atsr_unit(pdev))
1290                 return NULL;
1291
1292         return info;
1293 }
1294
1295 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1296 {
1297         if (!info || !dev_is_pci(info->dev))
1298                 return;
1299
1300         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1301 }
1302
1303 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1304 {
1305         if (!info->dev || !dev_is_pci(info->dev) ||
1306             !pci_ats_enabled(to_pci_dev(info->dev)))
1307                 return;
1308
1309         pci_disable_ats(to_pci_dev(info->dev));
1310 }
1311
1312 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1313                                   u64 addr, unsigned mask)
1314 {
1315         u16 sid, qdep;
1316         unsigned long flags;
1317         struct device_domain_info *info;
1318
1319         spin_lock_irqsave(&device_domain_lock, flags);
1320         list_for_each_entry(info, &domain->devices, link) {
1321                 struct pci_dev *pdev;
1322                 if (!info->dev || !dev_is_pci(info->dev))
1323                         continue;
1324
1325                 pdev = to_pci_dev(info->dev);
1326                 if (!pci_ats_enabled(pdev))
1327                         continue;
1328
1329                 sid = info->bus << 8 | info->devfn;
1330                 qdep = pci_ats_queue_depth(pdev);
1331                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1332         }
1333         spin_unlock_irqrestore(&device_domain_lock, flags);
1334 }
1335
1336 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1337                                   unsigned long pfn, unsigned int pages, int ih, int map)
1338 {
1339         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1340         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1341
1342         BUG_ON(pages == 0);
1343
1344         if (ih)
1345                 ih = 1 << 6;
1346         /*
1347          * Fallback to domain selective flush if no PSI support or the size is
1348          * too big.
1349          * PSI requires page size to be 2 ^ x, and the base address is naturally
1350          * aligned to the size
1351          */
1352         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1353                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1354                                                 DMA_TLB_DSI_FLUSH);
1355         else
1356                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1357                                                 DMA_TLB_PSI_FLUSH);
1358
1359         /*
1360          * In caching mode, changes of pages from non-present to present require
1361          * flush. However, device IOTLB doesn't need to be flushed in this case.
1362          */
1363         if (!cap_caching_mode(iommu->cap) || !map)
1364                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1365 }
1366
1367 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1368 {
1369         u32 pmen;
1370         unsigned long flags;
1371
1372         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1373         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1374         pmen &= ~DMA_PMEN_EPM;
1375         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1376
1377         /* wait for the protected region status bit to clear */
1378         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1379                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1380
1381         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1382 }
1383
1384 static int iommu_enable_translation(struct intel_iommu *iommu)
1385 {
1386         u32 sts;
1387         unsigned long flags;
1388
1389         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1390         iommu->gcmd |= DMA_GCMD_TE;
1391         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1392
1393         /* Make sure hardware complete it */
1394         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1395                       readl, (sts & DMA_GSTS_TES), sts);
1396
1397         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1398         return 0;
1399 }
1400
1401 static int iommu_disable_translation(struct intel_iommu *iommu)
1402 {
1403         u32 sts;
1404         unsigned long flag;
1405
1406         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1407         iommu->gcmd &= ~DMA_GCMD_TE;
1408         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1409
1410         /* Make sure hardware complete it */
1411         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1412                       readl, (!(sts & DMA_GSTS_TES)), sts);
1413
1414         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1415         return 0;
1416 }
1417
1418
1419 static int iommu_init_domains(struct intel_iommu *iommu)
1420 {
1421         unsigned long ndomains;
1422         unsigned long nlongs;
1423
1424         ndomains = cap_ndoms(iommu->cap);
1425         pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1426                  iommu->seq_id, ndomains);
1427         nlongs = BITS_TO_LONGS(ndomains);
1428
1429         spin_lock_init(&iommu->lock);
1430
1431         /* TBD: there might be 64K domains,
1432          * consider other allocation for future chip
1433          */
1434         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1435         if (!iommu->domain_ids) {
1436                 pr_err("IOMMU%d: allocating domain id array failed\n",
1437                        iommu->seq_id);
1438                 return -ENOMEM;
1439         }
1440         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1441                         GFP_KERNEL);
1442         if (!iommu->domains) {
1443                 pr_err("IOMMU%d: allocating domain array failed\n",
1444                        iommu->seq_id);
1445                 kfree(iommu->domain_ids);
1446                 iommu->domain_ids = NULL;
1447                 return -ENOMEM;
1448         }
1449
1450         /*
1451          * if Caching mode is set, then invalid translations are tagged
1452          * with domainid 0. Hence we need to pre-allocate it.
1453          */
1454         if (cap_caching_mode(iommu->cap))
1455                 set_bit(0, iommu->domain_ids);
1456         return 0;
1457 }
1458
1459 static void free_dmar_iommu(struct intel_iommu *iommu)
1460 {
1461         struct dmar_domain *domain;
1462         int i, count;
1463         unsigned long flags;
1464
1465         if ((iommu->domains) && (iommu->domain_ids)) {
1466                 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1467                         /*
1468                          * Domain id 0 is reserved for invalid translation
1469                          * if hardware supports caching mode.
1470                          */
1471                         if (cap_caching_mode(iommu->cap) && i == 0)
1472                                 continue;
1473
1474                         domain = iommu->domains[i];
1475                         clear_bit(i, iommu->domain_ids);
1476
1477                         spin_lock_irqsave(&domain->iommu_lock, flags);
1478                         count = --domain->iommu_count;
1479                         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1480                         if (count == 0)
1481                                 domain_exit(domain);
1482                 }
1483         }
1484
1485         if (iommu->gcmd & DMA_GCMD_TE)
1486                 iommu_disable_translation(iommu);
1487
1488         kfree(iommu->domains);
1489         kfree(iommu->domain_ids);
1490         iommu->domains = NULL;
1491         iommu->domain_ids = NULL;
1492
1493         g_iommus[iommu->seq_id] = NULL;
1494
1495         /* free context mapping */
1496         free_context_table(iommu);
1497 }
1498
1499 static struct dmar_domain *alloc_domain(bool vm)
1500 {
1501         /* domain id for virtual machine, it won't be set in context */
1502         static atomic_t vm_domid = ATOMIC_INIT(0);
1503         struct dmar_domain *domain;
1504
1505         domain = alloc_domain_mem();
1506         if (!domain)
1507                 return NULL;
1508
1509         domain->nid = -1;
1510         domain->iommu_count = 0;
1511         memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
1512         domain->flags = 0;
1513         spin_lock_init(&domain->iommu_lock);
1514         INIT_LIST_HEAD(&domain->devices);
1515         if (vm) {
1516                 domain->id = atomic_inc_return(&vm_domid);
1517                 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1518         }
1519
1520         return domain;
1521 }
1522
1523 static int iommu_attach_domain(struct dmar_domain *domain,
1524                                struct intel_iommu *iommu)
1525 {
1526         int num;
1527         unsigned long ndomains;
1528         unsigned long flags;
1529
1530         ndomains = cap_ndoms(iommu->cap);
1531
1532         spin_lock_irqsave(&iommu->lock, flags);
1533
1534         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1535         if (num >= ndomains) {
1536                 spin_unlock_irqrestore(&iommu->lock, flags);
1537                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1538                 return -ENOMEM;
1539         }
1540
1541         domain->id = num;
1542         domain->iommu_count++;
1543         set_bit(num, iommu->domain_ids);
1544         set_bit(iommu->seq_id, domain->iommu_bmp);
1545         iommu->domains[num] = domain;
1546         spin_unlock_irqrestore(&iommu->lock, flags);
1547
1548         return 0;
1549 }
1550
1551 static void iommu_detach_domain(struct dmar_domain *domain,
1552                                 struct intel_iommu *iommu)
1553 {
1554         unsigned long flags;
1555         int num, ndomains;
1556
1557         spin_lock_irqsave(&iommu->lock, flags);
1558         ndomains = cap_ndoms(iommu->cap);
1559         for_each_set_bit(num, iommu->domain_ids, ndomains) {
1560                 if (iommu->domains[num] == domain) {
1561                         clear_bit(num, iommu->domain_ids);
1562                         iommu->domains[num] = NULL;
1563                         break;
1564                 }
1565         }
1566         spin_unlock_irqrestore(&iommu->lock, flags);
1567 }
1568
1569 static struct iova_domain reserved_iova_list;
1570 static struct lock_class_key reserved_rbtree_key;
1571
1572 static int dmar_init_reserved_ranges(void)
1573 {
1574         struct pci_dev *pdev = NULL;
1575         struct iova *iova;
1576         int i;
1577
1578         init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1579
1580         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1581                 &reserved_rbtree_key);
1582
1583         /* IOAPIC ranges shouldn't be accessed by DMA */
1584         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1585                 IOVA_PFN(IOAPIC_RANGE_END));
1586         if (!iova) {
1587                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1588                 return -ENODEV;
1589         }
1590
1591         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1592         for_each_pci_dev(pdev) {
1593                 struct resource *r;
1594
1595                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1596                         r = &pdev->resource[i];
1597                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1598                                 continue;
1599                         iova = reserve_iova(&reserved_iova_list,
1600                                             IOVA_PFN(r->start),
1601                                             IOVA_PFN(r->end));
1602                         if (!iova) {
1603                                 printk(KERN_ERR "Reserve iova failed\n");
1604                                 return -ENODEV;
1605                         }
1606                 }
1607         }
1608         return 0;
1609 }
1610
1611 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1612 {
1613         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1614 }
1615
1616 static inline int guestwidth_to_adjustwidth(int gaw)
1617 {
1618         int agaw;
1619         int r = (gaw - 12) % 9;
1620
1621         if (r == 0)
1622                 agaw = gaw;
1623         else
1624                 agaw = gaw + 9 - r;
1625         if (agaw > 64)
1626                 agaw = 64;
1627         return agaw;
1628 }
1629
1630 static int domain_init(struct dmar_domain *domain, int guest_width)
1631 {
1632         struct intel_iommu *iommu;
1633         int adjust_width, agaw;
1634         unsigned long sagaw;
1635
1636         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1637         domain_reserve_special_ranges(domain);
1638
1639         /* calculate AGAW */
1640         iommu = domain_get_iommu(domain);
1641         if (guest_width > cap_mgaw(iommu->cap))
1642                 guest_width = cap_mgaw(iommu->cap);
1643         domain->gaw = guest_width;
1644         adjust_width = guestwidth_to_adjustwidth(guest_width);
1645         agaw = width_to_agaw(adjust_width);
1646         sagaw = cap_sagaw(iommu->cap);
1647         if (!test_bit(agaw, &sagaw)) {
1648                 /* hardware doesn't support it, choose a bigger one */
1649                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1650                 agaw = find_next_bit(&sagaw, 5, agaw);
1651                 if (agaw >= 5)
1652                         return -ENODEV;
1653         }
1654         domain->agaw = agaw;
1655
1656         if (ecap_coherent(iommu->ecap))
1657                 domain->iommu_coherency = 1;
1658         else
1659                 domain->iommu_coherency = 0;
1660
1661         if (ecap_sc_support(iommu->ecap))
1662                 domain->iommu_snooping = 1;
1663         else
1664                 domain->iommu_snooping = 0;
1665
1666         if (intel_iommu_superpage)
1667                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1668         else
1669                 domain->iommu_superpage = 0;
1670
1671         domain->nid = iommu->node;
1672
1673         /* always allocate the top pgd */
1674         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1675         if (!domain->pgd)
1676                 return -ENOMEM;
1677         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1678         return 0;
1679 }
1680
1681 static void domain_exit(struct dmar_domain *domain)
1682 {
1683         struct dmar_drhd_unit *drhd;
1684         struct intel_iommu *iommu;
1685         struct page *freelist = NULL;
1686
1687         /* Domain 0 is reserved, so dont process it */
1688         if (!domain)
1689                 return;
1690
1691         /* Flush any lazy unmaps that may reference this domain */
1692         if (!intel_iommu_strict)
1693                 flush_unmaps_timeout(0);
1694
1695         /* remove associated devices */
1696         domain_remove_dev_info(domain);
1697
1698         /* destroy iovas */
1699         put_iova_domain(&domain->iovad);
1700
1701         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1702
1703         /* clear attached or cached domains */
1704         rcu_read_lock();
1705         for_each_active_iommu(iommu, drhd)
1706                 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1707                     test_bit(iommu->seq_id, domain->iommu_bmp))
1708                         iommu_detach_domain(domain, iommu);
1709         rcu_read_unlock();
1710
1711         dma_free_pagelist(freelist);
1712
1713         free_domain_mem(domain);
1714 }
1715
1716 static int domain_context_mapping_one(struct dmar_domain *domain,
1717                                       struct intel_iommu *iommu,
1718                                       u8 bus, u8 devfn, int translation)
1719 {
1720         struct context_entry *context;
1721         unsigned long flags;
1722         struct dma_pte *pgd;
1723         unsigned long num;
1724         unsigned long ndomains;
1725         int id;
1726         int agaw;
1727         struct device_domain_info *info = NULL;
1728
1729         pr_debug("Set context mapping for %02x:%02x.%d\n",
1730                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1731
1732         BUG_ON(!domain->pgd);
1733         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1734                translation != CONTEXT_TT_MULTI_LEVEL);
1735
1736         context = device_to_context_entry(iommu, bus, devfn);
1737         if (!context)
1738                 return -ENOMEM;
1739         spin_lock_irqsave(&iommu->lock, flags);
1740         if (context_present(context)) {
1741                 spin_unlock_irqrestore(&iommu->lock, flags);
1742                 return 0;
1743         }
1744
1745         id = domain->id;
1746         pgd = domain->pgd;
1747
1748         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1749             domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1750                 int found = 0;
1751
1752                 /* find an available domain id for this device in iommu */
1753                 ndomains = cap_ndoms(iommu->cap);
1754                 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1755                         if (iommu->domains[num] == domain) {
1756                                 id = num;
1757                                 found = 1;
1758                                 break;
1759                         }
1760                 }
1761
1762                 if (found == 0) {
1763                         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1764                         if (num >= ndomains) {
1765                                 spin_unlock_irqrestore(&iommu->lock, flags);
1766                                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1767                                 return -EFAULT;
1768                         }
1769
1770                         set_bit(num, iommu->domain_ids);
1771                         iommu->domains[num] = domain;
1772                         id = num;
1773                 }
1774
1775                 /* Skip top levels of page tables for
1776                  * iommu which has less agaw than default.
1777                  * Unnecessary for PT mode.
1778                  */
1779                 if (translation != CONTEXT_TT_PASS_THROUGH) {
1780                         for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1781                                 pgd = phys_to_virt(dma_pte_addr(pgd));
1782                                 if (!dma_pte_present(pgd)) {
1783                                         spin_unlock_irqrestore(&iommu->lock, flags);
1784                                         return -ENOMEM;
1785                                 }
1786                         }
1787                 }
1788         }
1789
1790         context_set_domain_id(context, id);
1791
1792         if (translation != CONTEXT_TT_PASS_THROUGH) {
1793                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1794                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1795                                      CONTEXT_TT_MULTI_LEVEL;
1796         }
1797         /*
1798          * In pass through mode, AW must be programmed to indicate the largest
1799          * AGAW value supported by hardware. And ASR is ignored by hardware.
1800          */
1801         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1802                 context_set_address_width(context, iommu->msagaw);
1803         else {
1804                 context_set_address_root(context, virt_to_phys(pgd));
1805                 context_set_address_width(context, iommu->agaw);
1806         }
1807
1808         context_set_translation_type(context, translation);
1809         context_set_fault_enable(context);
1810         context_set_present(context);
1811         domain_flush_cache(domain, context, sizeof(*context));
1812
1813         /*
1814          * It's a non-present to present mapping. If hardware doesn't cache
1815          * non-present entry we only need to flush the write-buffer. If the
1816          * _does_ cache non-present entries, then it does so in the special
1817          * domain #0, which we have to flush:
1818          */
1819         if (cap_caching_mode(iommu->cap)) {
1820                 iommu->flush.flush_context(iommu, 0,
1821                                            (((u16)bus) << 8) | devfn,
1822                                            DMA_CCMD_MASK_NOBIT,
1823                                            DMA_CCMD_DEVICE_INVL);
1824                 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1825         } else {
1826                 iommu_flush_write_buffer(iommu);
1827         }
1828         iommu_enable_dev_iotlb(info);
1829         spin_unlock_irqrestore(&iommu->lock, flags);
1830
1831         spin_lock_irqsave(&domain->iommu_lock, flags);
1832         if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1833                 domain->iommu_count++;
1834                 if (domain->iommu_count == 1)
1835                         domain->nid = iommu->node;
1836                 domain_update_iommu_cap(domain);
1837         }
1838         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1839         return 0;
1840 }
1841
1842 static int
1843 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1844                        int translation)
1845 {
1846         int ret;
1847         struct pci_dev *pdev, *tmp, *parent;
1848         struct intel_iommu *iommu;
1849         u8 bus, devfn;
1850
1851         iommu = device_to_iommu(dev, &bus, &devfn);
1852         if (!iommu)
1853                 return -ENODEV;
1854
1855         ret = domain_context_mapping_one(domain, iommu, bus, devfn,
1856                                          translation);
1857         if (ret || !dev_is_pci(dev))
1858                 return ret;
1859
1860         /* dependent device mapping */
1861         pdev = to_pci_dev(dev);
1862         tmp = pci_find_upstream_pcie_bridge(pdev);
1863         if (!tmp)
1864                 return 0;
1865         /* Secondary interface's bus number and devfn 0 */
1866         parent = pdev->bus->self;
1867         while (parent != tmp) {
1868                 ret = domain_context_mapping_one(domain, iommu,
1869                                                  parent->bus->number,
1870                                                  parent->devfn, translation);
1871                 if (ret)
1872                         return ret;
1873                 parent = parent->bus->self;
1874         }
1875         if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1876                 return domain_context_mapping_one(domain, iommu,
1877                                         tmp->subordinate->number, 0,
1878                                         translation);
1879         else /* this is a legacy PCI bridge */
1880                 return domain_context_mapping_one(domain, iommu,
1881                                                   tmp->bus->number,
1882                                                   tmp->devfn,
1883                                                   translation);
1884 }
1885
1886 static int domain_context_mapped(struct device *dev)
1887 {
1888         int ret;
1889         struct pci_dev *pdev, *tmp, *parent;
1890         struct intel_iommu *iommu;
1891         u8 bus, devfn;
1892
1893         iommu = device_to_iommu(dev, &bus, &devfn);
1894         if (!iommu)
1895                 return -ENODEV;
1896
1897         ret = device_context_mapped(iommu, bus, devfn);
1898         if (!ret || !dev_is_pci(dev))
1899                 return ret;
1900
1901         /* dependent device mapping */
1902         pdev = to_pci_dev(dev);
1903         tmp = pci_find_upstream_pcie_bridge(pdev);
1904         if (!tmp)
1905                 return ret;
1906         /* Secondary interface's bus number and devfn 0 */
1907         parent = pdev->bus->self;
1908         while (parent != tmp) {
1909                 ret = device_context_mapped(iommu, parent->bus->number,
1910                                             parent->devfn);
1911                 if (!ret)
1912                         return ret;
1913                 parent = parent->bus->self;
1914         }
1915         if (pci_is_pcie(tmp))
1916                 return device_context_mapped(iommu, tmp->subordinate->number,
1917                                              0);
1918         else
1919                 return device_context_mapped(iommu, tmp->bus->number,
1920                                              tmp->devfn);
1921 }
1922
1923 /* Returns a number of VTD pages, but aligned to MM page size */
1924 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1925                                             size_t size)
1926 {
1927         host_addr &= ~PAGE_MASK;
1928         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1929 }
1930
1931 /* Return largest possible superpage level for a given mapping */
1932 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1933                                           unsigned long iov_pfn,
1934                                           unsigned long phy_pfn,
1935                                           unsigned long pages)
1936 {
1937         int support, level = 1;
1938         unsigned long pfnmerge;
1939
1940         support = domain->iommu_superpage;
1941
1942         /* To use a large page, the virtual *and* physical addresses
1943            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1944            of them will mean we have to use smaller pages. So just
1945            merge them and check both at once. */
1946         pfnmerge = iov_pfn | phy_pfn;
1947
1948         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1949                 pages >>= VTD_STRIDE_SHIFT;
1950                 if (!pages)
1951                         break;
1952                 pfnmerge >>= VTD_STRIDE_SHIFT;
1953                 level++;
1954                 support--;
1955         }
1956         return level;
1957 }
1958
1959 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1960                             struct scatterlist *sg, unsigned long phys_pfn,
1961                             unsigned long nr_pages, int prot)
1962 {
1963         struct dma_pte *first_pte = NULL, *pte = NULL;
1964         phys_addr_t uninitialized_var(pteval);
1965         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1966         unsigned long sg_res;
1967         unsigned int largepage_lvl = 0;
1968         unsigned long lvl_pages = 0;
1969
1970         BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1971
1972         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1973                 return -EINVAL;
1974
1975         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1976
1977         if (sg)
1978                 sg_res = 0;
1979         else {
1980                 sg_res = nr_pages + 1;
1981                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1982         }
1983
1984         while (nr_pages > 0) {
1985                 uint64_t tmp;
1986
1987                 if (!sg_res) {
1988                         sg_res = aligned_nrpages(sg->offset, sg->length);
1989                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1990                         sg->dma_length = sg->length;
1991                         pteval = page_to_phys(sg_page(sg)) | prot;
1992                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
1993                 }
1994
1995                 if (!pte) {
1996                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1997
1998                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
1999                         if (!pte)
2000                                 return -ENOMEM;
2001                         /* It is large page*/
2002                         if (largepage_lvl > 1) {
2003                                 pteval |= DMA_PTE_LARGE_PAGE;
2004                                 /* Ensure that old small page tables are removed to make room
2005                                    for superpage, if they exist. */
2006                                 dma_pte_clear_range(domain, iov_pfn,
2007                                                     iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2008                                 dma_pte_free_pagetable(domain, iov_pfn,
2009                                                        iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2010                         } else {
2011                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2012                         }
2013
2014                 }
2015                 /* We don't need lock here, nobody else
2016                  * touches the iova range
2017                  */
2018                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2019                 if (tmp) {
2020                         static int dumps = 5;
2021                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2022                                iov_pfn, tmp, (unsigned long long)pteval);
2023                         if (dumps) {
2024                                 dumps--;
2025                                 debug_dma_dump_mappings(NULL);
2026                         }
2027                         WARN_ON(1);
2028                 }
2029
2030                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2031
2032                 BUG_ON(nr_pages < lvl_pages);
2033                 BUG_ON(sg_res < lvl_pages);
2034
2035                 nr_pages -= lvl_pages;
2036                 iov_pfn += lvl_pages;
2037                 phys_pfn += lvl_pages;
2038                 pteval += lvl_pages * VTD_PAGE_SIZE;
2039                 sg_res -= lvl_pages;
2040
2041                 /* If the next PTE would be the first in a new page, then we
2042                    need to flush the cache on the entries we've just written.
2043                    And then we'll need to recalculate 'pte', so clear it and
2044                    let it get set again in the if (!pte) block above.
2045
2046                    If we're done (!nr_pages) we need to flush the cache too.
2047
2048                    Also if we've been setting superpages, we may need to
2049                    recalculate 'pte' and switch back to smaller pages for the
2050                    end of the mapping, if the trailing size is not enough to
2051                    use another superpage (i.e. sg_res < lvl_pages). */
2052                 pte++;
2053                 if (!nr_pages || first_pte_in_page(pte) ||
2054                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2055                         domain_flush_cache(domain, first_pte,
2056                                            (void *)pte - (void *)first_pte);
2057                         pte = NULL;
2058                 }
2059
2060                 if (!sg_res && nr_pages)
2061                         sg = sg_next(sg);
2062         }
2063         return 0;
2064 }
2065
2066 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2067                                     struct scatterlist *sg, unsigned long nr_pages,
2068                                     int prot)
2069 {
2070         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2071 }
2072
2073 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2074                                      unsigned long phys_pfn, unsigned long nr_pages,
2075                                      int prot)
2076 {
2077         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2078 }
2079
2080 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2081 {
2082         if (!iommu)
2083                 return;
2084
2085         clear_context_table(iommu, bus, devfn);
2086         iommu->flush.flush_context(iommu, 0, 0, 0,
2087                                            DMA_CCMD_GLOBAL_INVL);
2088         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2089 }
2090
2091 static inline void unlink_domain_info(struct device_domain_info *info)
2092 {
2093         assert_spin_locked(&device_domain_lock);
2094         list_del(&info->link);
2095         list_del(&info->global);
2096         if (info->dev)
2097                 info->dev->archdata.iommu = NULL;
2098 }
2099
2100 static void domain_remove_dev_info(struct dmar_domain *domain)
2101 {
2102         struct device_domain_info *info;
2103         unsigned long flags, flags2;
2104
2105         spin_lock_irqsave(&device_domain_lock, flags);
2106         while (!list_empty(&domain->devices)) {
2107                 info = list_entry(domain->devices.next,
2108                         struct device_domain_info, link);
2109                 unlink_domain_info(info);
2110                 spin_unlock_irqrestore(&device_domain_lock, flags);
2111
2112                 iommu_disable_dev_iotlb(info);
2113                 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2114
2115                 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
2116                         iommu_detach_dependent_devices(info->iommu, info->dev);
2117                         /* clear this iommu in iommu_bmp, update iommu count
2118                          * and capabilities
2119                          */
2120                         spin_lock_irqsave(&domain->iommu_lock, flags2);
2121                         if (test_and_clear_bit(info->iommu->seq_id,
2122                                                domain->iommu_bmp)) {
2123                                 domain->iommu_count--;
2124                                 domain_update_iommu_cap(domain);
2125                         }
2126                         spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2127                 }
2128
2129                 free_devinfo_mem(info);
2130                 spin_lock_irqsave(&device_domain_lock, flags);
2131         }
2132         spin_unlock_irqrestore(&device_domain_lock, flags);
2133 }
2134
2135 /*
2136  * find_domain
2137  * Note: we use struct device->archdata.iommu stores the info
2138  */
2139 static struct dmar_domain *find_domain(struct device *dev)
2140 {
2141         struct device_domain_info *info;
2142
2143         /* No lock here, assumes no domain exit in normal case */
2144         info = dev->archdata.iommu;
2145         if (info)
2146                 return info->domain;
2147         return NULL;
2148 }
2149
2150 static inline struct device_domain_info *
2151 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2152 {
2153         struct device_domain_info *info;
2154
2155         list_for_each_entry(info, &device_domain_list, global)
2156                 if (info->iommu->segment == segment && info->bus == bus &&
2157                     info->devfn == devfn)
2158                         return info;
2159
2160         return NULL;
2161 }
2162
2163 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2164                                                 int bus, int devfn,
2165                                                 struct device *dev,
2166                                                 struct dmar_domain *domain)
2167 {
2168         struct dmar_domain *found = NULL;
2169         struct device_domain_info *info;
2170         unsigned long flags;
2171
2172         info = alloc_devinfo_mem();
2173         if (!info)
2174                 return NULL;
2175
2176         info->bus = bus;
2177         info->devfn = devfn;
2178         info->dev = dev;
2179         info->domain = domain;
2180         info->iommu = iommu;
2181         if (!dev)
2182                 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2183
2184         spin_lock_irqsave(&device_domain_lock, flags);
2185         if (dev)
2186                 found = find_domain(dev);
2187         else {
2188                 struct device_domain_info *info2;
2189                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2190                 if (info2)
2191                         found = info2->domain;
2192         }
2193         if (found) {
2194                 spin_unlock_irqrestore(&device_domain_lock, flags);
2195                 free_devinfo_mem(info);
2196                 /* Caller must free the original domain */
2197                 return found;
2198         }
2199
2200         list_add(&info->link, &domain->devices);
2201         list_add(&info->global, &device_domain_list);
2202         if (dev)
2203                 dev->archdata.iommu = info;
2204         spin_unlock_irqrestore(&device_domain_lock, flags);
2205
2206         return domain;
2207 }
2208
2209 /* domain is initialized */
2210 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2211 {
2212         struct dmar_domain *domain, *free = NULL;
2213         struct intel_iommu *iommu = NULL;
2214         struct device_domain_info *info;
2215         struct pci_dev *dev_tmp = NULL;
2216         unsigned long flags;
2217         u8 bus, devfn, bridge_bus, bridge_devfn;
2218
2219         domain = find_domain(dev);
2220         if (domain)
2221                 return domain;
2222
2223         if (dev_is_pci(dev)) {
2224                 struct pci_dev *pdev = to_pci_dev(dev);
2225                 u16 segment;
2226
2227                 segment = pci_domain_nr(pdev->bus);
2228                 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2229                 if (dev_tmp) {
2230                         if (pci_is_pcie(dev_tmp)) {
2231                                 bridge_bus = dev_tmp->subordinate->number;
2232                                 bridge_devfn = 0;
2233                         } else {
2234                                 bridge_bus = dev_tmp->bus->number;
2235                                 bridge_devfn = dev_tmp->devfn;
2236                         }
2237                         spin_lock_irqsave(&device_domain_lock, flags);
2238                         info = dmar_search_domain_by_dev_info(segment, bus, devfn);
2239                         if (info) {
2240                                 iommu = info->iommu;
2241                                 domain = info->domain;
2242                         }
2243                         spin_unlock_irqrestore(&device_domain_lock, flags);
2244                         /* pcie-pci bridge already has a domain, uses it */
2245                         if (info)
2246                                 goto found_domain;
2247                 }
2248         }
2249
2250         iommu = device_to_iommu(dev, &bus, &devfn);
2251         if (!iommu)
2252                 goto error;
2253
2254         /* Allocate and initialize new domain for the device */
2255         domain = alloc_domain(false);
2256         if (!domain)
2257                 goto error;
2258         if (iommu_attach_domain(domain, iommu)) {
2259                 free_domain_mem(domain);
2260                 goto error;
2261         }
2262         free = domain;
2263         if (domain_init(domain, gaw))
2264                 goto error;
2265
2266         /* register pcie-to-pci device */
2267         if (dev_tmp) {
2268                 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
2269                                               NULL, domain);
2270                 if (!domain)
2271                         goto error;
2272         }
2273
2274 found_domain:
2275         domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2276 error:
2277         if (free != domain)
2278                 domain_exit(free);
2279
2280         return domain;
2281 }
2282
2283 static int iommu_identity_mapping;
2284 #define IDENTMAP_ALL            1
2285 #define IDENTMAP_GFX            2
2286 #define IDENTMAP_AZALIA         4
2287
2288 static int iommu_domain_identity_map(struct dmar_domain *domain,
2289                                      unsigned long long start,
2290                                      unsigned long long end)
2291 {
2292         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2293         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2294
2295         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2296                           dma_to_mm_pfn(last_vpfn))) {
2297                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2298                 return -ENOMEM;
2299         }
2300
2301         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2302                  start, end, domain->id);
2303         /*
2304          * RMRR range might have overlap with physical memory range,
2305          * clear it first
2306          */
2307         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2308
2309         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2310                                   last_vpfn - first_vpfn + 1,
2311                                   DMA_PTE_READ|DMA_PTE_WRITE);
2312 }
2313
2314 static int iommu_prepare_identity_map(struct device *dev,
2315                                       unsigned long long start,
2316                                       unsigned long long end)
2317 {
2318         struct dmar_domain *domain;
2319         int ret;
2320
2321         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2322         if (!domain)
2323                 return -ENOMEM;
2324
2325         /* For _hardware_ passthrough, don't bother. But for software
2326            passthrough, we do it anyway -- it may indicate a memory
2327            range which is reserved in E820, so which didn't get set
2328            up to start with in si_domain */
2329         if (domain == si_domain && hw_pass_through) {
2330                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2331                        dev_name(dev), start, end);
2332                 return 0;
2333         }
2334
2335         printk(KERN_INFO
2336                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2337                dev_name(dev), start, end);
2338         
2339         if (end < start) {
2340                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2341                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2342                         dmi_get_system_info(DMI_BIOS_VENDOR),
2343                         dmi_get_system_info(DMI_BIOS_VERSION),
2344                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2345                 ret = -EIO;
2346                 goto error;
2347         }
2348
2349         if (end >> agaw_to_width(domain->agaw)) {
2350                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2351                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2352                      agaw_to_width(domain->agaw),
2353                      dmi_get_system_info(DMI_BIOS_VENDOR),
2354                      dmi_get_system_info(DMI_BIOS_VERSION),
2355                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2356                 ret = -EIO;
2357                 goto error;
2358         }
2359
2360         ret = iommu_domain_identity_map(domain, start, end);
2361         if (ret)
2362                 goto error;
2363
2364         /* context entry init */
2365         ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2366         if (ret)
2367                 goto error;
2368
2369         return 0;
2370
2371  error:
2372         domain_exit(domain);
2373         return ret;
2374 }
2375
2376 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2377                                          struct device *dev)
2378 {
2379         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2380                 return 0;
2381         return iommu_prepare_identity_map(dev, rmrr->base_address,
2382                                           rmrr->end_address);
2383 }
2384
2385 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2386 static inline void iommu_prepare_isa(void)
2387 {
2388         struct pci_dev *pdev;
2389         int ret;
2390
2391         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2392         if (!pdev)
2393                 return;
2394
2395         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2396         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2397
2398         if (ret)
2399                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2400                        "floppy might not work\n");
2401
2402 }
2403 #else
2404 static inline void iommu_prepare_isa(void)
2405 {
2406         return;
2407 }
2408 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2409
2410 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2411
2412 static int __init si_domain_init(int hw)
2413 {
2414         struct dmar_drhd_unit *drhd;
2415         struct intel_iommu *iommu;
2416         int nid, ret = 0;
2417
2418         si_domain = alloc_domain(false);
2419         if (!si_domain)
2420                 return -EFAULT;
2421
2422         si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2423
2424         for_each_active_iommu(iommu, drhd) {
2425                 ret = iommu_attach_domain(si_domain, iommu);
2426                 if (ret) {
2427                         domain_exit(si_domain);
2428                         return -EFAULT;
2429                 }
2430         }
2431
2432         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2433                 domain_exit(si_domain);
2434                 return -EFAULT;
2435         }
2436
2437         pr_debug("IOMMU: identity mapping domain is domain %d\n",
2438                  si_domain->id);
2439
2440         if (hw)
2441                 return 0;
2442
2443         for_each_online_node(nid) {
2444                 unsigned long start_pfn, end_pfn;
2445                 int i;
2446
2447                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2448                         ret = iommu_domain_identity_map(si_domain,
2449                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2450                         if (ret)
2451                                 return ret;
2452                 }
2453         }
2454
2455         return 0;
2456 }
2457
2458 static int identity_mapping(struct device *dev)
2459 {
2460         struct device_domain_info *info;
2461
2462         if (likely(!iommu_identity_mapping))
2463                 return 0;
2464
2465         info = dev->archdata.iommu;
2466         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2467                 return (info->domain == si_domain);
2468
2469         return 0;
2470 }
2471
2472 static int domain_add_dev_info(struct dmar_domain *domain,
2473                                struct pci_dev *pdev,
2474                                int translation)
2475 {
2476         struct dmar_domain *ndomain;
2477         struct intel_iommu *iommu;
2478         u8 bus, devfn;
2479         int ret;
2480
2481         iommu = device_to_iommu(&pdev->dev, &bus, &devfn);
2482         if (!iommu)
2483                 return -ENODEV;
2484
2485         ndomain = dmar_insert_dev_info(iommu, bus, devfn, &pdev->dev, domain);
2486         if (ndomain != domain)
2487                 return -EBUSY;
2488
2489         ret = domain_context_mapping(domain, &pdev->dev, translation);
2490         if (ret) {
2491                 domain_remove_one_dev_info(domain, pdev);
2492                 return ret;
2493         }
2494
2495         return 0;
2496 }
2497
2498 static bool device_has_rmrr(struct device *dev)
2499 {
2500         struct dmar_rmrr_unit *rmrr;
2501         struct device *tmp;
2502         int i;
2503
2504         rcu_read_lock();
2505         for_each_rmrr_units(rmrr) {
2506                 /*
2507                  * Return TRUE if this RMRR contains the device that
2508                  * is passed in.
2509                  */
2510                 for_each_active_dev_scope(rmrr->devices,
2511                                           rmrr->devices_cnt, i, tmp)
2512                         if (tmp == dev) {
2513                                 rcu_read_unlock();
2514                                 return true;
2515                         }
2516         }
2517         rcu_read_unlock();
2518         return false;
2519 }
2520
2521 static int iommu_should_identity_map(struct device *dev, int startup)
2522 {
2523
2524         if (dev_is_pci(dev)) {
2525                 struct pci_dev *pdev = to_pci_dev(dev);
2526
2527                 /*
2528                  * We want to prevent any device associated with an RMRR from
2529                  * getting placed into the SI Domain. This is done because
2530                  * problems exist when devices are moved in and out of domains
2531                  * and their respective RMRR info is lost. We exempt USB devices
2532                  * from this process due to their usage of RMRRs that are known
2533                  * to not be needed after BIOS hand-off to OS.
2534                  */
2535                 if (device_has_rmrr(dev) &&
2536                     (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2537                         return 0;
2538
2539                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2540                         return 1;
2541
2542                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2543                         return 1;
2544
2545                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2546                         return 0;
2547
2548                 /*
2549                  * We want to start off with all devices in the 1:1 domain, and
2550                  * take them out later if we find they can't access all of memory.
2551                  *
2552                  * However, we can't do this for PCI devices behind bridges,
2553                  * because all PCI devices behind the same bridge will end up
2554                  * with the same source-id on their transactions.
2555                  *
2556                  * Practically speaking, we can't change things around for these
2557                  * devices at run-time, because we can't be sure there'll be no
2558                  * DMA transactions in flight for any of their siblings.
2559                  *
2560                  * So PCI devices (unless they're on the root bus) as well as
2561                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2562                  * the 1:1 domain, just in _case_ one of their siblings turns out
2563                  * not to be able to map all of memory.
2564                  */
2565                 if (!pci_is_pcie(pdev)) {
2566                         if (!pci_is_root_bus(pdev->bus))
2567                                 return 0;
2568                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2569                                 return 0;
2570                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2571                         return 0;
2572         } else {
2573                 if (device_has_rmrr(dev))
2574                         return 0;
2575         }
2576
2577         /*
2578          * At boot time, we don't yet know if devices will be 64-bit capable.
2579          * Assume that they will — if they turn out not to be, then we can
2580          * take them out of the 1:1 domain later.
2581          */
2582         if (!startup) {
2583                 /*
2584                  * If the device's dma_mask is less than the system's memory
2585                  * size then this is not a candidate for identity mapping.
2586                  */
2587                 u64 dma_mask = *dev->dma_mask;
2588
2589                 if (dev->coherent_dma_mask &&
2590                     dev->coherent_dma_mask < dma_mask)
2591                         dma_mask = dev->coherent_dma_mask;
2592
2593                 return dma_mask >= dma_get_required_mask(dev);
2594         }
2595
2596         return 1;
2597 }
2598
2599 static int __init iommu_prepare_static_identity_mapping(int hw)
2600 {
2601         struct pci_dev *pdev = NULL;
2602         int ret;
2603
2604         ret = si_domain_init(hw);
2605         if (ret)
2606                 return -EFAULT;
2607
2608         for_each_pci_dev(pdev) {
2609                 if (iommu_should_identity_map(&pdev->dev, 1)) {
2610                         ret = domain_add_dev_info(si_domain, pdev,
2611                                              hw ? CONTEXT_TT_PASS_THROUGH :
2612                                                   CONTEXT_TT_MULTI_LEVEL);
2613                         if (ret) {
2614                                 /* device not associated with an iommu */
2615                                 if (ret == -ENODEV)
2616                                         continue;
2617                                 return ret;
2618                         }
2619                         pr_info("IOMMU: %s identity mapping for device %s\n",
2620                                 hw ? "hardware" : "software", pci_name(pdev));
2621                 }
2622         }
2623
2624         return 0;
2625 }
2626
2627 static int __init init_dmars(void)
2628 {
2629         struct dmar_drhd_unit *drhd;
2630         struct dmar_rmrr_unit *rmrr;
2631         struct device *dev;
2632         struct intel_iommu *iommu;
2633         int i, ret;
2634
2635         /*
2636          * for each drhd
2637          *    allocate root
2638          *    initialize and program root entry to not present
2639          * endfor
2640          */
2641         for_each_drhd_unit(drhd) {
2642                 /*
2643                  * lock not needed as this is only incremented in the single
2644                  * threaded kernel __init code path all other access are read
2645                  * only
2646                  */
2647                 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2648                         g_num_of_iommus++;
2649                         continue;
2650                 }
2651                 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2652                           IOMMU_UNITS_SUPPORTED);
2653         }
2654
2655         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2656                         GFP_KERNEL);
2657         if (!g_iommus) {
2658                 printk(KERN_ERR "Allocating global iommu array failed\n");
2659                 ret = -ENOMEM;
2660                 goto error;
2661         }
2662
2663         deferred_flush = kzalloc(g_num_of_iommus *
2664                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2665         if (!deferred_flush) {
2666                 ret = -ENOMEM;
2667                 goto free_g_iommus;
2668         }
2669
2670         for_each_active_iommu(iommu, drhd) {
2671                 g_iommus[iommu->seq_id] = iommu;
2672
2673                 ret = iommu_init_domains(iommu);
2674                 if (ret)
2675                         goto free_iommu;
2676
2677                 /*
2678                  * TBD:
2679                  * we could share the same root & context tables
2680                  * among all IOMMU's. Need to Split it later.
2681                  */
2682                 ret = iommu_alloc_root_entry(iommu);
2683                 if (ret) {
2684                         printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2685                         goto free_iommu;
2686                 }
2687                 if (!ecap_pass_through(iommu->ecap))
2688                         hw_pass_through = 0;
2689         }
2690
2691         /*
2692          * Start from the sane iommu hardware state.
2693          */
2694         for_each_active_iommu(iommu, drhd) {
2695                 /*
2696                  * If the queued invalidation is already initialized by us
2697                  * (for example, while enabling interrupt-remapping) then
2698                  * we got the things already rolling from a sane state.
2699                  */
2700                 if (iommu->qi)
2701                         continue;
2702
2703                 /*
2704                  * Clear any previous faults.
2705                  */
2706                 dmar_fault(-1, iommu);
2707                 /*
2708                  * Disable queued invalidation if supported and already enabled
2709                  * before OS handover.
2710                  */
2711                 dmar_disable_qi(iommu);
2712         }
2713
2714         for_each_active_iommu(iommu, drhd) {
2715                 if (dmar_enable_qi(iommu)) {
2716                         /*
2717                          * Queued Invalidate not enabled, use Register Based
2718                          * Invalidate
2719                          */
2720                         iommu->flush.flush_context = __iommu_flush_context;
2721                         iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2722                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2723                                "invalidation\n",
2724                                 iommu->seq_id,
2725                                (unsigned long long)drhd->reg_base_addr);
2726                 } else {
2727                         iommu->flush.flush_context = qi_flush_context;
2728                         iommu->flush.flush_iotlb = qi_flush_iotlb;
2729                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2730                                "invalidation\n",
2731                                 iommu->seq_id,
2732                                (unsigned long long)drhd->reg_base_addr);
2733                 }
2734         }
2735
2736         if (iommu_pass_through)
2737                 iommu_identity_mapping |= IDENTMAP_ALL;
2738
2739 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2740         iommu_identity_mapping |= IDENTMAP_GFX;
2741 #endif
2742
2743         check_tylersburg_isoch();
2744
2745         /*
2746          * If pass through is not set or not enabled, setup context entries for
2747          * identity mappings for rmrr, gfx, and isa and may fall back to static
2748          * identity mapping if iommu_identity_mapping is set.
2749          */
2750         if (iommu_identity_mapping) {
2751                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2752                 if (ret) {
2753                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2754                         goto free_iommu;
2755                 }
2756         }
2757         /*
2758          * For each rmrr
2759          *   for each dev attached to rmrr
2760          *   do
2761          *     locate drhd for dev, alloc domain for dev
2762          *     allocate free domain
2763          *     allocate page table entries for rmrr
2764          *     if context not allocated for bus
2765          *           allocate and init context
2766          *           set present in root table for this bus
2767          *     init context with domain, translation etc
2768          *    endfor
2769          * endfor
2770          */
2771         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2772         for_each_rmrr_units(rmrr) {
2773                 /* some BIOS lists non-exist devices in DMAR table. */
2774                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2775                                           i, dev) {
2776                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
2777                         if (ret)
2778                                 printk(KERN_ERR
2779                                        "IOMMU: mapping reserved region failed\n");
2780                 }
2781         }
2782
2783         iommu_prepare_isa();
2784
2785         /*
2786          * for each drhd
2787          *   enable fault log
2788          *   global invalidate context cache
2789          *   global invalidate iotlb
2790          *   enable translation
2791          */
2792         for_each_iommu(iommu, drhd) {
2793                 if (drhd->ignored) {
2794                         /*
2795                          * we always have to disable PMRs or DMA may fail on
2796                          * this device
2797                          */
2798                         if (force_on)
2799                                 iommu_disable_protect_mem_regions(iommu);
2800                         continue;
2801                 }
2802
2803                 iommu_flush_write_buffer(iommu);
2804
2805                 ret = dmar_set_interrupt(iommu);
2806                 if (ret)
2807                         goto free_iommu;
2808
2809                 iommu_set_root_entry(iommu);
2810
2811                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2812                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2813
2814                 ret = iommu_enable_translation(iommu);
2815                 if (ret)
2816                         goto free_iommu;
2817
2818                 iommu_disable_protect_mem_regions(iommu);
2819         }
2820
2821         return 0;
2822
2823 free_iommu:
2824         for_each_active_iommu(iommu, drhd)
2825                 free_dmar_iommu(iommu);
2826         kfree(deferred_flush);
2827 free_g_iommus:
2828         kfree(g_iommus);
2829 error:
2830         return ret;
2831 }
2832
2833 /* This takes a number of _MM_ pages, not VTD pages */
2834 static struct iova *intel_alloc_iova(struct device *dev,
2835                                      struct dmar_domain *domain,
2836                                      unsigned long nrpages, uint64_t dma_mask)
2837 {
2838         struct pci_dev *pdev = to_pci_dev(dev);
2839         struct iova *iova = NULL;
2840
2841         /* Restrict dma_mask to the width that the iommu can handle */
2842         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2843
2844         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2845                 /*
2846                  * First try to allocate an io virtual address in
2847                  * DMA_BIT_MASK(32) and if that fails then try allocating
2848                  * from higher range
2849                  */
2850                 iova = alloc_iova(&domain->iovad, nrpages,
2851                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2852                 if (iova)
2853                         return iova;
2854         }
2855         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2856         if (unlikely(!iova)) {
2857                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2858                        nrpages, pci_name(pdev));
2859                 return NULL;
2860         }
2861
2862         return iova;
2863 }
2864
2865 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2866 {
2867         struct dmar_domain *domain;
2868         int ret;
2869
2870         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2871         if (!domain) {
2872                 printk(KERN_ERR "Allocating domain for %s failed",
2873                        dev_name(dev));
2874                 return NULL;
2875         }
2876
2877         /* make sure context mapping is ok */
2878         if (unlikely(!domain_context_mapped(dev))) {
2879                 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2880                 if (ret) {
2881                         printk(KERN_ERR "Domain context map for %s failed",
2882                                dev_name(dev));
2883                         return NULL;
2884                 }
2885         }
2886
2887         return domain;
2888 }
2889
2890 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2891 {
2892         struct device_domain_info *info;
2893
2894         /* No lock here, assumes no domain exit in normal case */
2895         info = dev->archdata.iommu;
2896         if (likely(info))
2897                 return info->domain;
2898
2899         return __get_valid_domain_for_dev(dev);
2900 }
2901
2902 static int iommu_dummy(struct device *dev)
2903 {
2904         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2905 }
2906
2907 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2908 static int iommu_no_mapping(struct device *dev)
2909 {
2910         struct pci_dev *pdev;
2911         int found;
2912
2913         if (unlikely(!dev_is_pci(dev)))
2914                 return 1;
2915
2916         if (iommu_dummy(dev))
2917                 return 1;
2918
2919         if (!iommu_identity_mapping)
2920                 return 0;
2921
2922         pdev = to_pci_dev(dev);
2923         found = identity_mapping(dev);
2924         if (found) {
2925                 if (iommu_should_identity_map(&pdev->dev, 0))
2926                         return 1;
2927                 else {
2928                         /*
2929                          * 32 bit DMA is removed from si_domain and fall back
2930                          * to non-identity mapping.
2931                          */
2932                         domain_remove_one_dev_info(si_domain, pdev);
2933                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2934                                pci_name(pdev));
2935                         return 0;
2936                 }
2937         } else {
2938                 /*
2939                  * In case of a detached 64 bit DMA device from vm, the device
2940                  * is put into si_domain for identity mapping.
2941                  */
2942                 if (iommu_should_identity_map(&pdev->dev, 0)) {
2943                         int ret;
2944                         ret = domain_add_dev_info(si_domain, pdev,
2945                                                   hw_pass_through ?
2946                                                   CONTEXT_TT_PASS_THROUGH :
2947                                                   CONTEXT_TT_MULTI_LEVEL);
2948                         if (!ret) {
2949                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
2950                                        pci_name(pdev));
2951                                 return 1;
2952                         }
2953                 }
2954         }
2955
2956         return 0;
2957 }
2958
2959 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2960                                      size_t size, int dir, u64 dma_mask)
2961 {
2962         struct pci_dev *pdev = to_pci_dev(hwdev);
2963         struct dmar_domain *domain;
2964         phys_addr_t start_paddr;
2965         struct iova *iova;
2966         int prot = 0;
2967         int ret;
2968         struct intel_iommu *iommu;
2969         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2970
2971         BUG_ON(dir == DMA_NONE);
2972
2973         if (iommu_no_mapping(hwdev))
2974                 return paddr;
2975
2976         domain = get_valid_domain_for_dev(hwdev);
2977         if (!domain)
2978                 return 0;
2979
2980         iommu = domain_get_iommu(domain);
2981         size = aligned_nrpages(paddr, size);
2982
2983         iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2984         if (!iova)
2985                 goto error;
2986
2987         /*
2988          * Check if DMAR supports zero-length reads on write only
2989          * mappings..
2990          */
2991         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2992                         !cap_zlr(iommu->cap))
2993                 prot |= DMA_PTE_READ;
2994         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2995                 prot |= DMA_PTE_WRITE;
2996         /*
2997          * paddr - (paddr + size) might be partial page, we should map the whole
2998          * page.  Note: if two part of one page are separately mapped, we
2999          * might have two guest_addr mapping to the same host paddr, but this
3000          * is not a big problem
3001          */
3002         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3003                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3004         if (ret)
3005                 goto error;
3006
3007         /* it's a non-present to present mapping. Only flush if caching mode */
3008         if (cap_caching_mode(iommu->cap))
3009                 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3010         else
3011                 iommu_flush_write_buffer(iommu);
3012
3013         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3014         start_paddr += paddr & ~PAGE_MASK;
3015         return start_paddr;
3016
3017 error:
3018         if (iova)
3019                 __free_iova(&domain->iovad, iova);
3020         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3021                 pci_name(pdev), size, (unsigned long long)paddr, dir);
3022         return 0;
3023 }
3024
3025 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3026                                  unsigned long offset, size_t size,
3027                                  enum dma_data_direction dir,
3028                                  struct dma_attrs *attrs)
3029 {
3030         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3031                                   dir, to_pci_dev(dev)->dma_mask);
3032 }
3033
3034 static void flush_unmaps(void)
3035 {
3036         int i, j;
3037
3038         timer_on = 0;
3039
3040         /* just flush them all */
3041         for (i = 0; i < g_num_of_iommus; i++) {
3042                 struct intel_iommu *iommu = g_iommus[i];
3043                 if (!iommu)
3044                         continue;
3045
3046                 if (!deferred_flush[i].next)
3047                         continue;
3048
3049                 /* In caching mode, global flushes turn emulation expensive */
3050                 if (!cap_caching_mode(iommu->cap))
3051                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3052                                          DMA_TLB_GLOBAL_FLUSH);
3053                 for (j = 0; j < deferred_flush[i].next; j++) {
3054                         unsigned long mask;
3055                         struct iova *iova = deferred_flush[i].iova[j];
3056                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3057
3058                         /* On real hardware multiple invalidations are expensive */
3059                         if (cap_caching_mode(iommu->cap))
3060                                 iommu_flush_iotlb_psi(iommu, domain->id,
3061                                         iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3062                                         !deferred_flush[i].freelist[j], 0);
3063                         else {
3064                                 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3065                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3066                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3067                         }
3068                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3069                         if (deferred_flush[i].freelist[j])
3070                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3071                 }
3072                 deferred_flush[i].next = 0;
3073         }
3074
3075         list_size = 0;
3076 }
3077
3078 static void flush_unmaps_timeout(unsigned long data)
3079 {
3080         unsigned long flags;
3081
3082         spin_lock_irqsave(&async_umap_flush_lock, flags);
3083         flush_unmaps();
3084         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3085 }
3086
3087 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3088 {
3089         unsigned long flags;
3090         int next, iommu_id;
3091         struct intel_iommu *iommu;
3092
3093         spin_lock_irqsave(&async_umap_flush_lock, flags);
3094         if (list_size == HIGH_WATER_MARK)
3095                 flush_unmaps();
3096
3097         iommu = domain_get_iommu(dom);
3098         iommu_id = iommu->seq_id;
3099
3100         next = deferred_flush[iommu_id].next;
3101         deferred_flush[iommu_id].domain[next] = dom;
3102         deferred_flush[iommu_id].iova[next] = iova;
3103         deferred_flush[iommu_id].freelist[next] = freelist;
3104         deferred_flush[iommu_id].next++;
3105
3106         if (!timer_on) {
3107                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3108                 timer_on = 1;
3109         }
3110         list_size++;
3111         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3112 }
3113
3114 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3115                              size_t size, enum dma_data_direction dir,
3116                              struct dma_attrs *attrs)
3117 {
3118         struct pci_dev *pdev = to_pci_dev(dev);
3119         struct dmar_domain *domain;
3120         unsigned long start_pfn, last_pfn;
3121         struct iova *iova;
3122         struct intel_iommu *iommu;
3123         struct page *freelist;
3124
3125         if (iommu_no_mapping(dev))
3126                 return;
3127
3128         domain = find_domain(dev);
3129         BUG_ON(!domain);
3130
3131         iommu = domain_get_iommu(domain);
3132
3133         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3134         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3135                       (unsigned long long)dev_addr))
3136                 return;
3137
3138         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3139         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3140
3141         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3142                  pci_name(pdev), start_pfn, last_pfn);
3143
3144         freelist = domain_unmap(domain, start_pfn, last_pfn);
3145
3146         if (intel_iommu_strict) {
3147                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3148                                       last_pfn - start_pfn + 1, !freelist, 0);
3149                 /* free iova */
3150                 __free_iova(&domain->iovad, iova);
3151                 dma_free_pagelist(freelist);
3152         } else {
3153                 add_unmap(domain, iova, freelist);
3154                 /*
3155                  * queue up the release of the unmap to save the 1/6th of the
3156                  * cpu used up by the iotlb flush operation...
3157                  */
3158         }
3159 }
3160
3161 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
3162                                   dma_addr_t *dma_handle, gfp_t flags,
3163                                   struct dma_attrs *attrs)
3164 {
3165         void *vaddr;
3166         int order;
3167
3168         size = PAGE_ALIGN(size);
3169         order = get_order(size);
3170
3171         if (!iommu_no_mapping(hwdev))
3172                 flags &= ~(GFP_DMA | GFP_DMA32);
3173         else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3174                 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3175                         flags |= GFP_DMA;
3176                 else
3177                         flags |= GFP_DMA32;
3178         }
3179
3180         vaddr = (void *)__get_free_pages(flags, order);
3181         if (!vaddr)
3182                 return NULL;
3183         memset(vaddr, 0, size);
3184
3185         *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3186                                          DMA_BIDIRECTIONAL,
3187                                          hwdev->coherent_dma_mask);
3188         if (*dma_handle)
3189                 return vaddr;
3190         free_pages((unsigned long)vaddr, order);
3191         return NULL;
3192 }
3193
3194 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3195                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3196 {
3197         int order;
3198
3199         size = PAGE_ALIGN(size);
3200         order = get_order(size);
3201
3202         intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3203         free_pages((unsigned long)vaddr, order);
3204 }
3205
3206 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3207                            int nelems, enum dma_data_direction dir,
3208                            struct dma_attrs *attrs)
3209 {
3210         struct dmar_domain *domain;
3211         unsigned long start_pfn, last_pfn;
3212         struct iova *iova;
3213         struct intel_iommu *iommu;
3214         struct page *freelist;
3215
3216         if (iommu_no_mapping(hwdev))
3217                 return;
3218
3219         domain = find_domain(hwdev);
3220         BUG_ON(!domain);
3221
3222         iommu = domain_get_iommu(domain);
3223
3224         iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3225         if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3226                       (unsigned long long)sglist[0].dma_address))
3227                 return;
3228
3229         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3230         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3231
3232         freelist = domain_unmap(domain, start_pfn, last_pfn);
3233
3234         if (intel_iommu_strict) {
3235                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3236                                       last_pfn - start_pfn + 1, !freelist, 0);
3237                 /* free iova */
3238                 __free_iova(&domain->iovad, iova);
3239                 dma_free_pagelist(freelist);
3240         } else {
3241                 add_unmap(domain, iova, freelist);
3242                 /*
3243                  * queue up the release of the unmap to save the 1/6th of the
3244                  * cpu used up by the iotlb flush operation...
3245                  */
3246         }
3247 }
3248
3249 static int intel_nontranslate_map_sg(struct device *hddev,
3250         struct scatterlist *sglist, int nelems, int dir)
3251 {
3252         int i;
3253         struct scatterlist *sg;
3254
3255         for_each_sg(sglist, sg, nelems, i) {
3256                 BUG_ON(!sg_page(sg));
3257                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3258                 sg->dma_length = sg->length;
3259         }
3260         return nelems;
3261 }
3262
3263 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3264                         enum dma_data_direction dir, struct dma_attrs *attrs)
3265 {
3266         int i;
3267         struct pci_dev *pdev = to_pci_dev(hwdev);
3268         struct dmar_domain *domain;
3269         size_t size = 0;
3270         int prot = 0;
3271         struct iova *iova = NULL;
3272         int ret;
3273         struct scatterlist *sg;
3274         unsigned long start_vpfn;
3275         struct intel_iommu *iommu;
3276
3277         BUG_ON(dir == DMA_NONE);
3278         if (iommu_no_mapping(hwdev))
3279                 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3280
3281         domain = get_valid_domain_for_dev(hwdev);
3282         if (!domain)
3283                 return 0;
3284
3285         iommu = domain_get_iommu(domain);
3286
3287         for_each_sg(sglist, sg, nelems, i)
3288                 size += aligned_nrpages(sg->offset, sg->length);
3289
3290         iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3291                                 pdev->dma_mask);
3292         if (!iova) {
3293                 sglist->dma_length = 0;
3294                 return 0;
3295         }
3296
3297         /*
3298          * Check if DMAR supports zero-length reads on write only
3299          * mappings..
3300          */
3301         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3302                         !cap_zlr(iommu->cap))
3303                 prot |= DMA_PTE_READ;
3304         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3305                 prot |= DMA_PTE_WRITE;
3306
3307         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3308
3309         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3310         if (unlikely(ret)) {
3311                 /*  clear the page */
3312                 dma_pte_clear_range(domain, start_vpfn,
3313                                     start_vpfn + size - 1);
3314                 /* free page tables */
3315                 dma_pte_free_pagetable(domain, start_vpfn,
3316                                        start_vpfn + size - 1);
3317                 /* free iova */
3318                 __free_iova(&domain->iovad, iova);
3319                 return 0;
3320         }
3321
3322         /* it's a non-present to present mapping. Only flush if caching mode */
3323         if (cap_caching_mode(iommu->cap))
3324                 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3325         else
3326                 iommu_flush_write_buffer(iommu);
3327
3328         return nelems;
3329 }
3330
3331 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3332 {
3333         return !dma_addr;
3334 }
3335
3336 struct dma_map_ops intel_dma_ops = {
3337         .alloc = intel_alloc_coherent,
3338         .free = intel_free_coherent,
3339         .map_sg = intel_map_sg,
3340         .unmap_sg = intel_unmap_sg,
3341         .map_page = intel_map_page,
3342         .unmap_page = intel_unmap_page,
3343         .mapping_error = intel_mapping_error,
3344 };
3345
3346 static inline int iommu_domain_cache_init(void)
3347 {
3348         int ret = 0;
3349
3350         iommu_domain_cache = kmem_cache_create("iommu_domain",
3351                                          sizeof(struct dmar_domain),
3352                                          0,
3353                                          SLAB_HWCACHE_ALIGN,
3354
3355                                          NULL);
3356         if (!iommu_domain_cache) {
3357                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3358                 ret = -ENOMEM;
3359         }
3360
3361         return ret;
3362 }
3363
3364 static inline int iommu_devinfo_cache_init(void)
3365 {
3366         int ret = 0;
3367
3368         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3369                                          sizeof(struct device_domain_info),
3370                                          0,
3371                                          SLAB_HWCACHE_ALIGN,
3372                                          NULL);
3373         if (!iommu_devinfo_cache) {
3374                 printk(KERN_ERR "Couldn't create devinfo cache\n");
3375                 ret = -ENOMEM;
3376         }
3377
3378         return ret;
3379 }
3380
3381 static inline int iommu_iova_cache_init(void)
3382 {
3383         int ret = 0;
3384
3385         iommu_iova_cache = kmem_cache_create("iommu_iova",
3386                                          sizeof(struct iova),
3387                                          0,
3388                                          SLAB_HWCACHE_ALIGN,
3389                                          NULL);
3390         if (!iommu_iova_cache) {
3391                 printk(KERN_ERR "Couldn't create iova cache\n");
3392                 ret = -ENOMEM;
3393         }
3394
3395         return ret;
3396 }
3397
3398 static int __init iommu_init_mempool(void)
3399 {
3400         int ret;
3401         ret = iommu_iova_cache_init();
3402         if (ret)
3403                 return ret;
3404
3405         ret = iommu_domain_cache_init();
3406         if (ret)
3407                 goto domain_error;
3408
3409         ret = iommu_devinfo_cache_init();
3410         if (!ret)
3411                 return ret;
3412
3413         kmem_cache_destroy(iommu_domain_cache);
3414 domain_error:
3415         kmem_cache_destroy(iommu_iova_cache);
3416
3417         return -ENOMEM;
3418 }
3419
3420 static void __init iommu_exit_mempool(void)
3421 {
3422         kmem_cache_destroy(iommu_devinfo_cache);
3423         kmem_cache_destroy(iommu_domain_cache);
3424         kmem_cache_destroy(iommu_iova_cache);
3425
3426 }
3427
3428 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3429 {
3430         struct dmar_drhd_unit *drhd;
3431         u32 vtbar;
3432         int rc;
3433
3434         /* We know that this device on this chipset has its own IOMMU.
3435          * If we find it under a different IOMMU, then the BIOS is lying
3436          * to us. Hope that the IOMMU for this device is actually
3437          * disabled, and it needs no translation...
3438          */
3439         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3440         if (rc) {
3441                 /* "can't" happen */
3442                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3443                 return;
3444         }
3445         vtbar &= 0xffff0000;
3446
3447         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3448         drhd = dmar_find_matched_drhd_unit(pdev);
3449         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3450                             TAINT_FIRMWARE_WORKAROUND,
3451                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3452                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3453 }
3454 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3455
3456 static void __init init_no_remapping_devices(void)
3457 {
3458         struct dmar_drhd_unit *drhd;
3459         struct device *dev;
3460         int i;
3461
3462         for_each_drhd_unit(drhd) {
3463                 if (!drhd->include_all) {
3464                         for_each_active_dev_scope(drhd->devices,
3465                                                   drhd->devices_cnt, i, dev)
3466                                 break;
3467                         /* ignore DMAR unit if no devices exist */
3468                         if (i == drhd->devices_cnt)
3469                                 drhd->ignored = 1;
3470                 }
3471         }
3472
3473         for_each_active_drhd_unit(drhd) {
3474                 if (drhd->include_all)
3475                         continue;
3476
3477                 for_each_active_dev_scope(drhd->devices,
3478                                           drhd->devices_cnt, i, dev)
3479                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3480                                 break;
3481                 if (i < drhd->devices_cnt)
3482                         continue;
3483
3484                 /* This IOMMU has *only* gfx devices. Either bypass it or
3485                    set the gfx_mapped flag, as appropriate */
3486                 if (dmar_map_gfx) {
3487                         intel_iommu_gfx_mapped = 1;
3488                 } else {
3489                         drhd->ignored = 1;
3490                         for_each_active_dev_scope(drhd->devices,
3491                                                   drhd->devices_cnt, i, dev)
3492                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3493                 }
3494         }
3495 }
3496
3497 #ifdef CONFIG_SUSPEND
3498 static int init_iommu_hw(void)
3499 {
3500         struct dmar_drhd_unit *drhd;
3501         struct intel_iommu *iommu = NULL;
3502
3503         for_each_active_iommu(iommu, drhd)
3504                 if (iommu->qi)
3505                         dmar_reenable_qi(iommu);
3506
3507         for_each_iommu(iommu, drhd) {
3508                 if (drhd->ignored) {
3509                         /*
3510                          * we always have to disable PMRs or DMA may fail on
3511                          * this device
3512                          */
3513                         if (force_on)
3514                                 iommu_disable_protect_mem_regions(iommu);
3515                         continue;
3516                 }
3517         
3518                 iommu_flush_write_buffer(iommu);
3519
3520                 iommu_set_root_entry(iommu);
3521
3522                 iommu->flush.flush_context(iommu, 0, 0, 0,
3523                                            DMA_CCMD_GLOBAL_INVL);
3524                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3525                                          DMA_TLB_GLOBAL_FLUSH);
3526                 if (iommu_enable_translation(iommu))
3527                         return 1;
3528                 iommu_disable_protect_mem_regions(iommu);
3529         }
3530
3531         return 0;
3532 }
3533
3534 static void iommu_flush_all(void)
3535 {
3536         struct dmar_drhd_unit *drhd;
3537         struct intel_iommu *iommu;
3538
3539         for_each_active_iommu(iommu, drhd) {
3540                 iommu->flush.flush_context(iommu, 0, 0, 0,
3541                                            DMA_CCMD_GLOBAL_INVL);
3542                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3543                                          DMA_TLB_GLOBAL_FLUSH);
3544         }
3545 }
3546
3547 static int iommu_suspend(void)
3548 {
3549         struct dmar_drhd_unit *drhd;
3550         struct intel_iommu *iommu = NULL;
3551         unsigned long flag;
3552
3553         for_each_active_iommu(iommu, drhd) {
3554                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3555                                                  GFP_ATOMIC);
3556                 if (!iommu->iommu_state)
3557                         goto nomem;
3558         }
3559
3560         iommu_flush_all();
3561
3562         for_each_active_iommu(iommu, drhd) {
3563                 iommu_disable_translation(iommu);
3564
3565                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3566
3567                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3568                         readl(iommu->reg + DMAR_FECTL_REG);
3569                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3570                         readl(iommu->reg + DMAR_FEDATA_REG);
3571                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3572                         readl(iommu->reg + DMAR_FEADDR_REG);
3573                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3574                         readl(iommu->reg + DMAR_FEUADDR_REG);
3575
3576                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3577         }
3578         return 0;
3579
3580 nomem:
3581         for_each_active_iommu(iommu, drhd)
3582                 kfree(iommu->iommu_state);
3583
3584         return -ENOMEM;
3585 }
3586
3587 static void iommu_resume(void)
3588 {
3589         struct dmar_drhd_unit *drhd;
3590         struct intel_iommu *iommu = NULL;
3591         unsigned long flag;
3592
3593         if (init_iommu_hw()) {
3594                 if (force_on)
3595                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3596                 else
3597                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3598                 return;
3599         }
3600
3601         for_each_active_iommu(iommu, drhd) {
3602
3603                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3604
3605                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3606                         iommu->reg + DMAR_FECTL_REG);
3607                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3608                         iommu->reg + DMAR_FEDATA_REG);
3609                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3610                         iommu->reg + DMAR_FEADDR_REG);
3611                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3612                         iommu->reg + DMAR_FEUADDR_REG);
3613
3614                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3615         }
3616
3617         for_each_active_iommu(iommu, drhd)
3618                 kfree(iommu->iommu_state);
3619 }
3620
3621 static struct syscore_ops iommu_syscore_ops = {
3622         .resume         = iommu_resume,
3623         .suspend        = iommu_suspend,
3624 };
3625
3626 static void __init init_iommu_pm_ops(void)
3627 {
3628         register_syscore_ops(&iommu_syscore_ops);
3629 }
3630
3631 #else
3632 static inline void init_iommu_pm_ops(void) {}
3633 #endif  /* CONFIG_PM */
3634
3635
3636 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3637 {
3638         struct acpi_dmar_reserved_memory *rmrr;
3639         struct dmar_rmrr_unit *rmrru;
3640
3641         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3642         if (!rmrru)
3643                 return -ENOMEM;
3644
3645         rmrru->hdr = header;
3646         rmrr = (struct acpi_dmar_reserved_memory *)header;
3647         rmrru->base_address = rmrr->base_address;
3648         rmrru->end_address = rmrr->end_address;
3649         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3650                                 ((void *)rmrr) + rmrr->header.length,
3651                                 &rmrru->devices_cnt);
3652         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3653                 kfree(rmrru);
3654                 return -ENOMEM;
3655         }
3656
3657         list_add(&rmrru->list, &dmar_rmrr_units);
3658
3659         return 0;
3660 }
3661
3662 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3663 {
3664         struct acpi_dmar_atsr *atsr;
3665         struct dmar_atsr_unit *atsru;
3666
3667         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3668         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3669         if (!atsru)
3670                 return -ENOMEM;
3671
3672         atsru->hdr = hdr;
3673         atsru->include_all = atsr->flags & 0x1;
3674         if (!atsru->include_all) {
3675                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3676                                 (void *)atsr + atsr->header.length,
3677                                 &atsru->devices_cnt);
3678                 if (atsru->devices_cnt && atsru->devices == NULL) {
3679                         kfree(atsru);
3680                         return -ENOMEM;
3681                 }
3682         }
3683
3684         list_add_rcu(&atsru->list, &dmar_atsr_units);
3685
3686         return 0;
3687 }
3688
3689 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3690 {
3691         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3692         kfree(atsru);
3693 }
3694
3695 static void intel_iommu_free_dmars(void)
3696 {
3697         struct dmar_rmrr_unit *rmrru, *rmrr_n;
3698         struct dmar_atsr_unit *atsru, *atsr_n;
3699
3700         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3701                 list_del(&rmrru->list);
3702                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3703                 kfree(rmrru);
3704         }
3705
3706         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3707                 list_del(&atsru->list);
3708                 intel_iommu_free_atsr(atsru);
3709         }
3710 }
3711
3712 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3713 {
3714         int i, ret = 1;
3715         struct pci_bus *bus;
3716         struct pci_dev *bridge = NULL;
3717         struct device *tmp;
3718         struct acpi_dmar_atsr *atsr;
3719         struct dmar_atsr_unit *atsru;
3720
3721         dev = pci_physfn(dev);
3722         for (bus = dev->bus; bus; bus = bus->parent) {
3723                 bridge = bus->self;
3724                 if (!bridge || !pci_is_pcie(bridge) ||
3725                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3726                         return 0;
3727                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3728                         break;
3729         }
3730         if (!bridge)
3731                 return 0;
3732
3733         rcu_read_lock();
3734         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3735                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3736                 if (atsr->segment != pci_domain_nr(dev->bus))
3737                         continue;
3738
3739                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3740                         if (tmp == &bridge->dev)
3741                                 goto out;
3742
3743                 if (atsru->include_all)
3744                         goto out;
3745         }
3746         ret = 0;
3747 out:
3748         rcu_read_unlock();
3749
3750         return ret;
3751 }
3752
3753 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3754 {
3755         int ret = 0;
3756         struct dmar_rmrr_unit *rmrru;
3757         struct dmar_atsr_unit *atsru;
3758         struct acpi_dmar_atsr *atsr;
3759         struct acpi_dmar_reserved_memory *rmrr;
3760
3761         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3762                 return 0;
3763
3764         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3765                 rmrr = container_of(rmrru->hdr,
3766                                     struct acpi_dmar_reserved_memory, header);
3767                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3768                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3769                                 ((void *)rmrr) + rmrr->header.length,
3770                                 rmrr->segment, rmrru->devices,
3771                                 rmrru->devices_cnt);
3772                         if (ret > 0)
3773                                 break;
3774                         else if(ret < 0)
3775                                 return ret;
3776                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3777                         if (dmar_remove_dev_scope(info, rmrr->segment,
3778                                 rmrru->devices, rmrru->devices_cnt))
3779                                 break;
3780                 }
3781         }
3782
3783         list_for_each_entry(atsru, &dmar_atsr_units, list) {
3784                 if (atsru->include_all)
3785                         continue;
3786
3787                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3788                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3789                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3790                                         (void *)atsr + atsr->header.length,
3791                                         atsr->segment, atsru->devices,
3792                                         atsru->devices_cnt);
3793                         if (ret > 0)
3794                                 break;
3795                         else if(ret < 0)
3796                                 return ret;
3797                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3798                         if (dmar_remove_dev_scope(info, atsr->segment,
3799                                         atsru->devices, atsru->devices_cnt))
3800                                 break;
3801                 }
3802         }
3803
3804         return 0;
3805 }
3806
3807 /*
3808  * Here we only respond to action of unbound device from driver.
3809  *
3810  * Added device is not attached to its DMAR domain here yet. That will happen
3811  * when mapping the device to iova.
3812  */
3813 static int device_notifier(struct notifier_block *nb,
3814                                   unsigned long action, void *data)
3815 {
3816         struct device *dev = data;
3817         struct pci_dev *pdev = to_pci_dev(dev);
3818         struct dmar_domain *domain;
3819
3820         if (iommu_dummy(dev))
3821                 return 0;
3822
3823         if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3824             action != BUS_NOTIFY_DEL_DEVICE)
3825                 return 0;
3826
3827         domain = find_domain(dev);
3828         if (!domain)
3829                 return 0;
3830
3831         down_read(&dmar_global_lock);
3832         domain_remove_one_dev_info(domain, pdev);
3833         if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3834             !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3835             list_empty(&domain->devices))
3836                 domain_exit(domain);
3837         up_read(&dmar_global_lock);
3838
3839         return 0;
3840 }
3841
3842 static struct notifier_block device_nb = {
3843         .notifier_call = device_notifier,
3844 };
3845
3846 static int intel_iommu_memory_notifier(struct notifier_block *nb,
3847                                        unsigned long val, void *v)
3848 {
3849         struct memory_notify *mhp = v;
3850         unsigned long long start, end;
3851         unsigned long start_vpfn, last_vpfn;
3852
3853         switch (val) {
3854         case MEM_GOING_ONLINE:
3855                 start = mhp->start_pfn << PAGE_SHIFT;
3856                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3857                 if (iommu_domain_identity_map(si_domain, start, end)) {
3858                         pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3859                                 start, end);
3860                         return NOTIFY_BAD;
3861                 }
3862                 break;
3863
3864         case MEM_OFFLINE:
3865         case MEM_CANCEL_ONLINE:
3866                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3867                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3868                 while (start_vpfn <= last_vpfn) {
3869                         struct iova *iova;
3870                         struct dmar_drhd_unit *drhd;
3871                         struct intel_iommu *iommu;
3872                         struct page *freelist;
3873
3874                         iova = find_iova(&si_domain->iovad, start_vpfn);
3875                         if (iova == NULL) {
3876                                 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3877                                          start_vpfn);
3878                                 break;
3879                         }
3880
3881                         iova = split_and_remove_iova(&si_domain->iovad, iova,
3882                                                      start_vpfn, last_vpfn);
3883                         if (iova == NULL) {
3884                                 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3885                                         start_vpfn, last_vpfn);
3886                                 return NOTIFY_BAD;
3887                         }
3888
3889                         freelist = domain_unmap(si_domain, iova->pfn_lo,
3890                                                iova->pfn_hi);
3891
3892                         rcu_read_lock();
3893                         for_each_active_iommu(iommu, drhd)
3894                                 iommu_flush_iotlb_psi(iommu, si_domain->id,
3895                                         iova->pfn_lo,
3896                                         iova->pfn_hi - iova->pfn_lo + 1,
3897                                         !freelist, 0);
3898                         rcu_read_unlock();
3899                         dma_free_pagelist(freelist);
3900
3901                         start_vpfn = iova->pfn_hi + 1;
3902                         free_iova_mem(iova);
3903                 }
3904                 break;
3905         }
3906
3907         return NOTIFY_OK;
3908 }
3909
3910 static struct notifier_block intel_iommu_memory_nb = {
3911         .notifier_call = intel_iommu_memory_notifier,
3912         .priority = 0
3913 };
3914
3915 int __init intel_iommu_init(void)
3916 {
3917         int ret = -ENODEV;
3918         struct dmar_drhd_unit *drhd;
3919         struct intel_iommu *iommu;
3920
3921         /* VT-d is required for a TXT/tboot launch, so enforce that */
3922         force_on = tboot_force_iommu();
3923
3924         if (iommu_init_mempool()) {
3925                 if (force_on)
3926                         panic("tboot: Failed to initialize iommu memory\n");
3927                 return -ENOMEM;
3928         }
3929
3930         down_write(&dmar_global_lock);
3931         if (dmar_table_init()) {
3932                 if (force_on)
3933                         panic("tboot: Failed to initialize DMAR table\n");
3934                 goto out_free_dmar;
3935         }
3936
3937         /*
3938          * Disable translation if already enabled prior to OS handover.
3939          */
3940         for_each_active_iommu(iommu, drhd)
3941                 if (iommu->gcmd & DMA_GCMD_TE)
3942                         iommu_disable_translation(iommu);
3943
3944         if (dmar_dev_scope_init() < 0) {
3945                 if (force_on)
3946                         panic("tboot: Failed to initialize DMAR device scope\n");
3947                 goto out_free_dmar;
3948         }
3949
3950         if (no_iommu || dmar_disabled)
3951                 goto out_free_dmar;
3952
3953         if (list_empty(&dmar_rmrr_units))
3954                 printk(KERN_INFO "DMAR: No RMRR found\n");
3955
3956         if (list_empty(&dmar_atsr_units))
3957                 printk(KERN_INFO "DMAR: No ATSR found\n");
3958
3959         if (dmar_init_reserved_ranges()) {
3960                 if (force_on)
3961                         panic("tboot: Failed to reserve iommu ranges\n");
3962                 goto out_free_reserved_range;
3963         }
3964
3965         init_no_remapping_devices();
3966
3967         ret = init_dmars();
3968         if (ret) {
3969                 if (force_on)
3970                         panic("tboot: Failed to initialize DMARs\n");
3971                 printk(KERN_ERR "IOMMU: dmar init failed\n");
3972                 goto out_free_reserved_range;
3973         }
3974         up_write(&dmar_global_lock);
3975         printk(KERN_INFO
3976         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3977
3978         init_timer(&unmap_timer);
3979 #ifdef CONFIG_SWIOTLB
3980         swiotlb = 0;
3981 #endif
3982         dma_ops = &intel_dma_ops;
3983
3984         init_iommu_pm_ops();
3985
3986         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3987         bus_register_notifier(&pci_bus_type, &device_nb);
3988         if (si_domain && !hw_pass_through)
3989                 register_memory_notifier(&intel_iommu_memory_nb);
3990
3991         intel_iommu_enabled = 1;
3992
3993         return 0;
3994
3995 out_free_reserved_range:
3996         put_iova_domain(&reserved_iova_list);
3997 out_free_dmar:
3998         intel_iommu_free_dmars();
3999         up_write(&dmar_global_lock);
4000         iommu_exit_mempool();
4001         return ret;
4002 }
4003
4004 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4005                                            struct device *dev)
4006 {
4007         struct pci_dev *tmp, *parent, *pdev;
4008
4009         if (!iommu || !dev || !dev_is_pci(dev))
4010                 return;
4011
4012         pdev = to_pci_dev(dev);
4013
4014         /* dependent device detach */
4015         tmp = pci_find_upstream_pcie_bridge(pdev);
4016         /* Secondary interface's bus number and devfn 0 */
4017         if (tmp) {
4018                 parent = pdev->bus->self;
4019                 while (parent != tmp) {
4020                         iommu_detach_dev(iommu, parent->bus->number,
4021                                          parent->devfn);
4022                         parent = parent->bus->self;
4023                 }
4024                 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
4025                         iommu_detach_dev(iommu,
4026                                 tmp->subordinate->number, 0);
4027                 else /* this is a legacy PCI bridge */
4028                         iommu_detach_dev(iommu, tmp->bus->number,
4029                                          tmp->devfn);
4030         }
4031 }
4032
4033 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4034                                           struct pci_dev *pdev)
4035 {
4036         struct device_domain_info *info, *tmp;
4037         struct intel_iommu *iommu;
4038         unsigned long flags;
4039         int found = 0;
4040         u8 bus, devfn;
4041
4042         iommu = device_to_iommu(&pdev->dev, &bus, &devfn);
4043         if (!iommu)
4044                 return;
4045
4046         spin_lock_irqsave(&device_domain_lock, flags);
4047         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4048                 if (info->iommu->segment == pci_domain_nr(pdev->bus) &&
4049                     info->bus == pdev->bus->number &&
4050                     info->devfn == pdev->devfn) {
4051                         unlink_domain_info(info);
4052                         spin_unlock_irqrestore(&device_domain_lock, flags);
4053
4054                         iommu_disable_dev_iotlb(info);
4055                         iommu_detach_dev(iommu, info->bus, info->devfn);
4056                         iommu_detach_dependent_devices(iommu, &pdev->dev);
4057                         free_devinfo_mem(info);
4058
4059                         spin_lock_irqsave(&device_domain_lock, flags);
4060
4061                         if (found)
4062                                 break;
4063                         else
4064                                 continue;
4065                 }
4066
4067                 /* if there is no other devices under the same iommu
4068                  * owned by this domain, clear this iommu in iommu_bmp
4069                  * update iommu count and coherency
4070                  */
4071                 if (info->iommu == iommu)
4072                         found = 1;
4073         }
4074
4075         spin_unlock_irqrestore(&device_domain_lock, flags);
4076
4077         if (found == 0) {
4078                 unsigned long tmp_flags;
4079                 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
4080                 clear_bit(iommu->seq_id, domain->iommu_bmp);
4081                 domain->iommu_count--;
4082                 domain_update_iommu_cap(domain);
4083                 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
4084
4085                 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
4086                     !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
4087                         spin_lock_irqsave(&iommu->lock, tmp_flags);
4088                         clear_bit(domain->id, iommu->domain_ids);
4089                         iommu->domains[domain->id] = NULL;
4090                         spin_unlock_irqrestore(&iommu->lock, tmp_flags);
4091                 }
4092         }
4093 }
4094
4095 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4096 {
4097         int adjust_width;
4098
4099         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
4100         domain_reserve_special_ranges(domain);
4101
4102         /* calculate AGAW */
4103         domain->gaw = guest_width;
4104         adjust_width = guestwidth_to_adjustwidth(guest_width);
4105         domain->agaw = width_to_agaw(adjust_width);
4106
4107         domain->iommu_coherency = 0;
4108         domain->iommu_snooping = 0;
4109         domain->iommu_superpage = 0;
4110         domain->max_addr = 0;
4111         domain->nid = -1;
4112
4113         /* always allocate the top pgd */
4114         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4115         if (!domain->pgd)
4116                 return -ENOMEM;
4117         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4118         return 0;
4119 }
4120
4121 static int intel_iommu_domain_init(struct iommu_domain *domain)
4122 {
4123         struct dmar_domain *dmar_domain;
4124
4125         dmar_domain = alloc_domain(true);
4126         if (!dmar_domain) {
4127                 printk(KERN_ERR
4128                         "intel_iommu_domain_init: dmar_domain == NULL\n");
4129                 return -ENOMEM;
4130         }
4131         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4132                 printk(KERN_ERR
4133                         "intel_iommu_domain_init() failed\n");
4134                 domain_exit(dmar_domain);
4135                 return -ENOMEM;
4136         }
4137         domain_update_iommu_cap(dmar_domain);
4138         domain->priv = dmar_domain;
4139
4140         domain->geometry.aperture_start = 0;
4141         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4142         domain->geometry.force_aperture = true;
4143
4144         return 0;
4145 }
4146
4147 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4148 {
4149         struct dmar_domain *dmar_domain = domain->priv;
4150
4151         domain->priv = NULL;
4152         domain_exit(dmar_domain);
4153 }
4154
4155 static int intel_iommu_attach_device(struct iommu_domain *domain,
4156                                      struct device *dev)
4157 {
4158         struct dmar_domain *dmar_domain = domain->priv;
4159         struct pci_dev *pdev = to_pci_dev(dev);
4160         struct intel_iommu *iommu;
4161         int addr_width;
4162         u8 bus, devfn;
4163
4164         /* normally pdev is not mapped */
4165         if (unlikely(domain_context_mapped(&pdev->dev))) {
4166                 struct dmar_domain *old_domain;
4167
4168                 old_domain = find_domain(dev);
4169                 if (old_domain) {
4170                         if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4171                             dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4172                                 domain_remove_one_dev_info(old_domain, pdev);
4173                         else
4174                                 domain_remove_dev_info(old_domain);
4175                 }
4176         }
4177
4178         iommu = device_to_iommu(dev, &bus, &devfn);
4179         if (!iommu)
4180                 return -ENODEV;
4181
4182         /* check if this iommu agaw is sufficient for max mapped address */
4183         addr_width = agaw_to_width(iommu->agaw);
4184         if (addr_width > cap_mgaw(iommu->cap))
4185                 addr_width = cap_mgaw(iommu->cap);
4186
4187         if (dmar_domain->max_addr > (1LL << addr_width)) {
4188                 printk(KERN_ERR "%s: iommu width (%d) is not "
4189                        "sufficient for the mapped address (%llx)\n",
4190                        __func__, addr_width, dmar_domain->max_addr);
4191                 return -EFAULT;
4192         }
4193         dmar_domain->gaw = addr_width;
4194
4195         /*
4196          * Knock out extra levels of page tables if necessary
4197          */
4198         while (iommu->agaw < dmar_domain->agaw) {
4199                 struct dma_pte *pte;
4200
4201                 pte = dmar_domain->pgd;
4202                 if (dma_pte_present(pte)) {
4203                         dmar_domain->pgd = (struct dma_pte *)
4204                                 phys_to_virt(dma_pte_addr(pte));
4205                         free_pgtable_page(pte);
4206                 }
4207                 dmar_domain->agaw--;
4208         }
4209
4210         return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
4211 }
4212
4213 static void intel_iommu_detach_device(struct iommu_domain *domain,
4214                                       struct device *dev)
4215 {
4216         struct dmar_domain *dmar_domain = domain->priv;
4217         struct pci_dev *pdev = to_pci_dev(dev);
4218
4219         domain_remove_one_dev_info(dmar_domain, pdev);
4220 }
4221
4222 static int intel_iommu_map(struct iommu_domain *domain,
4223                            unsigned long iova, phys_addr_t hpa,
4224                            size_t size, int iommu_prot)
4225 {
4226         struct dmar_domain *dmar_domain = domain->priv;
4227         u64 max_addr;
4228         int prot = 0;
4229         int ret;
4230
4231         if (iommu_prot & IOMMU_READ)
4232                 prot |= DMA_PTE_READ;
4233         if (iommu_prot & IOMMU_WRITE)
4234                 prot |= DMA_PTE_WRITE;
4235         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4236                 prot |= DMA_PTE_SNP;
4237
4238         max_addr = iova + size;
4239         if (dmar_domain->max_addr < max_addr) {
4240                 u64 end;
4241
4242                 /* check if minimum agaw is sufficient for mapped address */
4243                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4244                 if (end < max_addr) {
4245                         printk(KERN_ERR "%s: iommu width (%d) is not "
4246                                "sufficient for the mapped address (%llx)\n",
4247                                __func__, dmar_domain->gaw, max_addr);
4248                         return -EFAULT;
4249                 }
4250                 dmar_domain->max_addr = max_addr;
4251         }
4252         /* Round up size to next multiple of PAGE_SIZE, if it and
4253            the low bits of hpa would take us onto the next page */
4254         size = aligned_nrpages(hpa, size);
4255         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4256                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4257         return ret;
4258 }
4259
4260 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4261                                 unsigned long iova, size_t size)
4262 {
4263         struct dmar_domain *dmar_domain = domain->priv;
4264         struct page *freelist = NULL;
4265         struct intel_iommu *iommu;
4266         unsigned long start_pfn, last_pfn;
4267         unsigned int npages;
4268         int iommu_id, num, ndomains, level = 0;
4269
4270         /* Cope with horrid API which requires us to unmap more than the
4271            size argument if it happens to be a large-page mapping. */
4272         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4273                 BUG();
4274
4275         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4276                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4277
4278         start_pfn = iova >> VTD_PAGE_SHIFT;
4279         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4280
4281         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4282
4283         npages = last_pfn - start_pfn + 1;
4284
4285         for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4286                iommu = g_iommus[iommu_id];
4287
4288                /*
4289                 * find bit position of dmar_domain
4290                 */
4291                ndomains = cap_ndoms(iommu->cap);
4292                for_each_set_bit(num, iommu->domain_ids, ndomains) {
4293                        if (iommu->domains[num] == dmar_domain)
4294                                iommu_flush_iotlb_psi(iommu, num, start_pfn,
4295                                                      npages, !freelist, 0);
4296                }
4297
4298         }
4299
4300         dma_free_pagelist(freelist);
4301
4302         if (dmar_domain->max_addr == iova + size)
4303                 dmar_domain->max_addr = iova;
4304
4305         return size;
4306 }
4307
4308 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4309                                             dma_addr_t iova)
4310 {
4311         struct dmar_domain *dmar_domain = domain->priv;
4312         struct dma_pte *pte;
4313         int level = 0;
4314         u64 phys = 0;
4315
4316         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4317         if (pte)
4318                 phys = dma_pte_addr(pte);
4319
4320         return phys;
4321 }
4322
4323 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4324                                       unsigned long cap)
4325 {
4326         struct dmar_domain *dmar_domain = domain->priv;
4327
4328         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4329                 return dmar_domain->iommu_snooping;
4330         if (cap == IOMMU_CAP_INTR_REMAP)
4331                 return irq_remapping_enabled;
4332
4333         return 0;
4334 }
4335
4336 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4337
4338 static int intel_iommu_add_device(struct device *dev)
4339 {
4340         struct pci_dev *pdev = to_pci_dev(dev);
4341         struct pci_dev *bridge, *dma_pdev = NULL;
4342         struct iommu_group *group;
4343         int ret;
4344         u8 bus, devfn;
4345
4346         if (!device_to_iommu(dev, &bus, &devfn))
4347                 return -ENODEV;
4348
4349         bridge = pci_find_upstream_pcie_bridge(pdev);
4350         if (bridge) {
4351                 if (pci_is_pcie(bridge))
4352                         dma_pdev = pci_get_domain_bus_and_slot(
4353                                                 pci_domain_nr(pdev->bus),
4354                                                 bridge->subordinate->number, 0);
4355                 if (!dma_pdev)
4356                         dma_pdev = pci_dev_get(bridge);
4357         } else
4358                 dma_pdev = pci_dev_get(pdev);
4359
4360         /* Account for quirked devices */
4361         swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4362
4363         /*
4364          * If it's a multifunction device that does not support our
4365          * required ACS flags, add to the same group as lowest numbered
4366          * function that also does not suport the required ACS flags.
4367          */
4368         if (dma_pdev->multifunction &&
4369             !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4370                 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4371
4372                 for (i = 0; i < 8; i++) {
4373                         struct pci_dev *tmp;
4374
4375                         tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4376                         if (!tmp)
4377                                 continue;
4378
4379                         if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4380                                 swap_pci_ref(&dma_pdev, tmp);
4381                                 break;
4382                         }
4383                         pci_dev_put(tmp);
4384                 }
4385         }
4386
4387         /*
4388          * Devices on the root bus go through the iommu.  If that's not us,
4389          * find the next upstream device and test ACS up to the root bus.
4390          * Finding the next device may require skipping virtual buses.
4391          */
4392         while (!pci_is_root_bus(dma_pdev->bus)) {
4393                 struct pci_bus *bus = dma_pdev->bus;
4394
4395                 while (!bus->self) {
4396                         if (!pci_is_root_bus(bus))
4397                                 bus = bus->parent;
4398                         else
4399                                 goto root_bus;
4400                 }
4401
4402                 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
4403                         break;
4404
4405                 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
4406         }
4407
4408 root_bus:
4409         group = iommu_group_get(&dma_pdev->dev);
4410         pci_dev_put(dma_pdev);
4411         if (!group) {
4412                 group = iommu_group_alloc();
4413                 if (IS_ERR(group))
4414                         return PTR_ERR(group);
4415         }
4416
4417         ret = iommu_group_add_device(group, dev);
4418
4419         iommu_group_put(group);
4420         return ret;
4421 }
4422
4423 static void intel_iommu_remove_device(struct device *dev)
4424 {
4425         iommu_group_remove_device(dev);
4426 }
4427
4428 static struct iommu_ops intel_iommu_ops = {
4429         .domain_init    = intel_iommu_domain_init,
4430         .domain_destroy = intel_iommu_domain_destroy,
4431         .attach_dev     = intel_iommu_attach_device,
4432         .detach_dev     = intel_iommu_detach_device,
4433         .map            = intel_iommu_map,
4434         .unmap          = intel_iommu_unmap,
4435         .iova_to_phys   = intel_iommu_iova_to_phys,
4436         .domain_has_cap = intel_iommu_domain_has_cap,
4437         .add_device     = intel_iommu_add_device,
4438         .remove_device  = intel_iommu_remove_device,
4439         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4440 };
4441
4442 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4443 {
4444         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4445         printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4446         dmar_map_gfx = 0;
4447 }
4448
4449 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4450 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4451 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4452 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4454 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4456
4457 static void quirk_iommu_rwbf(struct pci_dev *dev)
4458 {
4459         /*
4460          * Mobile 4 Series Chipset neglects to set RWBF capability,
4461          * but needs it. Same seems to hold for the desktop versions.
4462          */
4463         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4464         rwbf_quirk = 1;
4465 }
4466
4467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4468 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4470 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4472 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4474
4475 #define GGC 0x52
4476 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4477 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4478 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4479 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4480 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4481 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4482 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4483 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4484
4485 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4486 {
4487         unsigned short ggc;
4488
4489         if (pci_read_config_word(dev, GGC, &ggc))
4490                 return;
4491
4492         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4493                 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4494                 dmar_map_gfx = 0;
4495         } else if (dmar_map_gfx) {
4496                 /* we have to ensure the gfx device is idle before we flush */
4497                 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4498                 intel_iommu_strict = 1;
4499        }
4500 }
4501 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4502 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4503 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4504 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4505
4506 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4507    ISOCH DMAR unit for the Azalia sound device, but not give it any
4508    TLB entries, which causes it to deadlock. Check for that.  We do
4509    this in a function called from init_dmars(), instead of in a PCI
4510    quirk, because we don't want to print the obnoxious "BIOS broken"
4511    message if VT-d is actually disabled.
4512 */
4513 static void __init check_tylersburg_isoch(void)
4514 {
4515         struct pci_dev *pdev;
4516         uint32_t vtisochctrl;
4517
4518         /* If there's no Azalia in the system anyway, forget it. */
4519         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4520         if (!pdev)
4521                 return;
4522         pci_dev_put(pdev);
4523
4524         /* System Management Registers. Might be hidden, in which case
4525            we can't do the sanity check. But that's OK, because the
4526            known-broken BIOSes _don't_ actually hide it, so far. */
4527         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4528         if (!pdev)
4529                 return;
4530
4531         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4532                 pci_dev_put(pdev);
4533                 return;
4534         }
4535
4536         pci_dev_put(pdev);
4537
4538         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4539         if (vtisochctrl & 1)
4540                 return;
4541
4542         /* Drop all bits other than the number of TLB entries */
4543         vtisochctrl &= 0x1c;
4544
4545         /* If we have the recommended number of TLB entries (16), fine. */
4546         if (vtisochctrl == 0x10)
4547                 return;
4548
4549         /* Zero TLB entries? You get to ride the short bus to school. */
4550         if (!vtisochctrl) {
4551                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4552                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4553                      dmi_get_system_info(DMI_BIOS_VENDOR),
4554                      dmi_get_system_info(DMI_BIOS_VERSION),
4555                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4556                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4557                 return;
4558         }
4559         
4560         printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4561                vtisochctrl);
4562 }