2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/bitmap.h>
24 #include <linux/slab.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/interrupt.h>
27 #include <linux/msi.h>
28 #include <linux/amd-iommu.h>
29 #include <linux/export.h>
30 #include <linux/iommu.h>
31 #include <linux/kmemleak.h>
32 #include <asm/pci-direct.h>
33 #include <asm/iommu.h>
35 #include <asm/x86_init.h>
36 #include <asm/iommu_table.h>
37 #include <asm/io_apic.h>
38 #include <asm/irq_remapping.h>
40 #include <linux/crash_dump.h>
41 #include "amd_iommu_proto.h"
42 #include "amd_iommu_types.h"
43 #include "irq_remapping.h"
46 * definitions for the ACPI scanning code
48 #define IVRS_HEADER_LENGTH 48
50 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
51 #define ACPI_IVMD_TYPE_ALL 0x20
52 #define ACPI_IVMD_TYPE 0x21
53 #define ACPI_IVMD_TYPE_RANGE 0x22
55 #define IVHD_DEV_ALL 0x01
56 #define IVHD_DEV_SELECT 0x02
57 #define IVHD_DEV_SELECT_RANGE_START 0x03
58 #define IVHD_DEV_RANGE_END 0x04
59 #define IVHD_DEV_ALIAS 0x42
60 #define IVHD_DEV_ALIAS_RANGE 0x43
61 #define IVHD_DEV_EXT_SELECT 0x46
62 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
63 #define IVHD_DEV_SPECIAL 0x48
64 #define IVHD_DEV_ACPI_HID 0xf0
66 #define UID_NOT_PRESENT 0
67 #define UID_IS_INTEGER 1
68 #define UID_IS_CHARACTER 2
70 #define IVHD_SPECIAL_IOAPIC 1
71 #define IVHD_SPECIAL_HPET 2
73 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
74 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
75 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
76 #define IVHD_FLAG_ISOC_EN_MASK 0x08
78 #define IVMD_FLAG_EXCL_RANGE 0x08
79 #define IVMD_FLAG_UNITY_MAP 0x01
81 #define ACPI_DEVFLAG_INITPASS 0x01
82 #define ACPI_DEVFLAG_EXTINT 0x02
83 #define ACPI_DEVFLAG_NMI 0x04
84 #define ACPI_DEVFLAG_SYSMGT1 0x10
85 #define ACPI_DEVFLAG_SYSMGT2 0x20
86 #define ACPI_DEVFLAG_LINT0 0x40
87 #define ACPI_DEVFLAG_LINT1 0x80
88 #define ACPI_DEVFLAG_ATSDIS 0x10000000
90 #define LOOP_TIMEOUT 100000
92 * ACPI table definitions
94 * These data structures are laid over the table to parse the important values
98 extern const struct iommu_ops amd_iommu_ops;
101 * structure describing one IOMMU in the ACPI table. Typically followed by one
102 * or more ivhd_entrys.
115 /* Following only valid on IVHD type 11h and 40h */
116 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
118 } __attribute__((packed));
121 * A device entry describing which devices a specific IOMMU translates and
122 * which requestor ids they use.
134 } __attribute__((packed));
137 * An AMD IOMMU memory definition structure. It defines things like exclusion
138 * ranges for devices and regions that should be unity mapped.
149 } __attribute__((packed));
152 bool amd_iommu_irq_remap __read_mostly;
154 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
156 static bool amd_iommu_detected;
157 static bool __initdata amd_iommu_disabled;
158 static int amd_iommu_target_ivhd_type;
160 u16 amd_iommu_last_bdf; /* largest PCI device id we have
162 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
164 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
166 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
169 /* Array to assign indices to IOMMUs*/
170 struct amd_iommu *amd_iommus[MAX_IOMMUS];
172 /* Number of IOMMUs present in the system */
173 static int amd_iommus_present;
175 /* IOMMUs have a non-present cache? */
176 bool amd_iommu_np_cache __read_mostly;
177 bool amd_iommu_iotlb_sup __read_mostly = true;
179 u32 amd_iommu_max_pasid __read_mostly = ~0;
181 bool amd_iommu_v2_present __read_mostly;
182 static bool amd_iommu_pc_present __read_mostly;
184 bool amd_iommu_force_isolation __read_mostly;
187 * List of protection domains - used during resume
189 LIST_HEAD(amd_iommu_pd_list);
190 spinlock_t amd_iommu_pd_lock;
193 * Pointer to the device table which is shared by all AMD IOMMUs
194 * it is indexed by the PCI device id or the HT unit id and contains
195 * information about the domain the device belongs to as well as the
196 * page table root pointer.
198 struct dev_table_entry *amd_iommu_dev_table;
200 * Pointer to a device table which the content of old device table
201 * will be copied to. It's only be used in kdump kernel.
203 static struct dev_table_entry *old_dev_tbl_cpy;
206 * The alias table is a driver specific data structure which contains the
207 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
208 * More than one device can share the same requestor id.
210 u16 *amd_iommu_alias_table;
213 * The rlookup table is used to find the IOMMU which is responsible
214 * for a specific device. It is also indexed by the PCI device id.
216 struct amd_iommu **amd_iommu_rlookup_table;
219 * This table is used to find the irq remapping table for a given device id
222 struct irq_remap_table **irq_lookup_table;
225 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
226 * to know which ones are already in use.
228 unsigned long *amd_iommu_pd_alloc_bitmap;
230 static u32 dev_table_size; /* size of the device table */
231 static u32 alias_table_size; /* size of the alias table */
232 static u32 rlookup_table_size; /* size if the rlookup table */
234 enum iommu_init_state {
245 IOMMU_CMDLINE_DISABLED,
248 /* Early ioapic and hpet maps from kernel command line */
249 #define EARLY_MAP_SIZE 4
250 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
251 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
252 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
254 static int __initdata early_ioapic_map_size;
255 static int __initdata early_hpet_map_size;
256 static int __initdata early_acpihid_map_size;
258 static bool __initdata cmdline_maps;
260 static enum iommu_init_state init_state = IOMMU_START_STATE;
262 static int amd_iommu_enable_interrupts(void);
263 static int __init iommu_go_to_state(enum iommu_init_state state);
264 static void init_device_table_dma(void);
266 static bool __initdata amd_iommu_pre_enabled = true;
268 bool translation_pre_enabled(struct amd_iommu *iommu)
270 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
273 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
275 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
278 static void init_translation_status(struct amd_iommu *iommu)
282 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
283 if (ctrl & (1<<CONTROL_IOMMU_EN))
284 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
287 static inline void update_last_devid(u16 devid)
289 if (devid > amd_iommu_last_bdf)
290 amd_iommu_last_bdf = devid;
293 static inline unsigned long tbl_size(int entry_size)
295 unsigned shift = PAGE_SHIFT +
296 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
301 int amd_iommu_get_num_iommus(void)
303 return amd_iommus_present;
306 /* Access to l1 and l2 indexed register spaces */
308 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
312 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
313 pci_read_config_dword(iommu->dev, 0xfc, &val);
317 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
320 pci_write_config_dword(iommu->dev, 0xfc, val);
321 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
324 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
328 pci_write_config_dword(iommu->dev, 0xf0, address);
329 pci_read_config_dword(iommu->dev, 0xf4, &val);
333 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
335 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
336 pci_write_config_dword(iommu->dev, 0xf4, val);
339 /****************************************************************************
341 * AMD IOMMU MMIO register space handling functions
343 * These functions are used to program the IOMMU device registers in
344 * MMIO space required for that driver.
346 ****************************************************************************/
349 * This function set the exclusion range in the IOMMU. DMA accesses to the
350 * exclusion range are passed through untranslated
352 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
354 u64 start = iommu->exclusion_start & PAGE_MASK;
355 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
358 if (!iommu->exclusion_start)
361 entry = start | MMIO_EXCL_ENABLE_MASK;
362 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
363 &entry, sizeof(entry));
366 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
367 &entry, sizeof(entry));
370 /* Programs the physical address of the device table into the IOMMU hardware */
371 static void iommu_set_device_table(struct amd_iommu *iommu)
375 BUG_ON(iommu->mmio_base == NULL);
377 entry = virt_to_phys(amd_iommu_dev_table);
378 entry |= (dev_table_size >> 12) - 1;
379 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
380 &entry, sizeof(entry));
383 /* Generic functions to enable/disable certain features of the IOMMU. */
384 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
388 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
390 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
393 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
397 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
399 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
402 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
406 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
407 ctrl &= ~CTRL_INV_TO_MASK;
408 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
409 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
412 /* Function to enable the hardware */
413 static void iommu_enable(struct amd_iommu *iommu)
415 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
418 static void iommu_disable(struct amd_iommu *iommu)
420 /* Disable command buffer */
421 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
423 /* Disable event logging and event interrupts */
424 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
425 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
427 /* Disable IOMMU GA_LOG */
428 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
429 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
431 /* Disable IOMMU hardware itself */
432 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
436 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
437 * the system has one.
439 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
441 if (!request_mem_region(address, end, "amd_iommu")) {
442 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
444 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
448 return (u8 __iomem *)ioremap_nocache(address, end);
451 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
453 if (iommu->mmio_base)
454 iounmap(iommu->mmio_base);
455 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
458 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
474 /****************************************************************************
476 * The functions below belong to the first pass of AMD IOMMU ACPI table
477 * parsing. In this pass we try to find out the highest device id this
478 * code has to handle. Upon this information the size of the shared data
479 * structures is determined later.
481 ****************************************************************************/
484 * This function calculates the length of a given IVHD entry
486 static inline int ivhd_entry_length(u8 *ivhd)
488 u32 type = ((struct ivhd_entry *)ivhd)->type;
491 return 0x04 << (*ivhd >> 6);
492 } else if (type == IVHD_DEV_ACPI_HID) {
493 /* For ACPI_HID, offset 21 is uid len */
494 return *((u8 *)ivhd + 21) + 22;
500 * After reading the highest device id from the IOMMU PCI capability header
501 * this function looks if there is a higher device id defined in the ACPI table
503 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
505 u8 *p = (void *)h, *end = (void *)h;
506 struct ivhd_entry *dev;
508 u32 ivhd_size = get_ivhd_header_size(h);
511 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
519 dev = (struct ivhd_entry *)p;
522 /* Use maximum BDF value for DEV_ALL */
523 update_last_devid(0xffff);
525 case IVHD_DEV_SELECT:
526 case IVHD_DEV_RANGE_END:
528 case IVHD_DEV_EXT_SELECT:
529 /* all the above subfield types refer to device ids */
530 update_last_devid(dev->devid);
535 p += ivhd_entry_length(p);
543 static int __init check_ivrs_checksum(struct acpi_table_header *table)
546 u8 checksum = 0, *p = (u8 *)table;
548 for (i = 0; i < table->length; ++i)
551 /* ACPI table corrupt */
552 pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
560 * Iterate over all IVHD entries in the ACPI table and find the highest device
561 * id which we need to handle. This is the first of three functions which parse
562 * the ACPI table. So we check the checksum here.
564 static int __init find_last_devid_acpi(struct acpi_table_header *table)
566 u8 *p = (u8 *)table, *end = (u8 *)table;
567 struct ivhd_header *h;
569 p += IVRS_HEADER_LENGTH;
571 end += table->length;
573 h = (struct ivhd_header *)p;
574 if (h->type == amd_iommu_target_ivhd_type) {
575 int ret = find_last_devid_from_ivhd(h);
587 /****************************************************************************
589 * The following functions belong to the code path which parses the ACPI table
590 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
591 * data structures, initialize the device/alias/rlookup table and also
592 * basically initialize the hardware.
594 ****************************************************************************/
597 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
598 * write commands to that buffer later and the IOMMU will execute them
601 static int __init alloc_command_buffer(struct amd_iommu *iommu)
603 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
604 get_order(CMD_BUFFER_SIZE));
606 return iommu->cmd_buf ? 0 : -ENOMEM;
610 * This function resets the command buffer if the IOMMU stopped fetching
613 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
615 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
617 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
618 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
619 iommu->cmd_buf_head = 0;
620 iommu->cmd_buf_tail = 0;
622 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
626 * This function writes the command buffer address to the hardware and
629 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
633 BUG_ON(iommu->cmd_buf == NULL);
635 entry = (u64)virt_to_phys(iommu->cmd_buf);
636 entry |= MMIO_CMD_SIZE_512;
638 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
639 &entry, sizeof(entry));
641 amd_iommu_reset_cmd_buffer(iommu);
645 * This function disables the command buffer
647 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
649 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
652 static void __init free_command_buffer(struct amd_iommu *iommu)
654 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
657 /* allocates the memory where the IOMMU will log its events to */
658 static int __init alloc_event_buffer(struct amd_iommu *iommu)
660 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
661 get_order(EVT_BUFFER_SIZE));
663 return iommu->evt_buf ? 0 : -ENOMEM;
666 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
670 BUG_ON(iommu->evt_buf == NULL);
672 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
674 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
675 &entry, sizeof(entry));
677 /* set head and tail to zero manually */
678 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
679 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
681 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
685 * This function disables the event log buffer
687 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
689 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
692 static void __init free_event_buffer(struct amd_iommu *iommu)
694 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
697 /* allocates the memory where the IOMMU will log its events to */
698 static int __init alloc_ppr_log(struct amd_iommu *iommu)
700 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
701 get_order(PPR_LOG_SIZE));
703 return iommu->ppr_log ? 0 : -ENOMEM;
706 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
710 if (iommu->ppr_log == NULL)
713 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
715 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
716 &entry, sizeof(entry));
718 /* set head and tail to zero manually */
719 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
720 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
722 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
723 iommu_feature_enable(iommu, CONTROL_PPR_EN);
726 static void __init free_ppr_log(struct amd_iommu *iommu)
728 if (iommu->ppr_log == NULL)
731 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
734 static void free_ga_log(struct amd_iommu *iommu)
736 #ifdef CONFIG_IRQ_REMAP
738 free_pages((unsigned long)iommu->ga_log,
739 get_order(GA_LOG_SIZE));
740 if (iommu->ga_log_tail)
741 free_pages((unsigned long)iommu->ga_log_tail,
746 static int iommu_ga_log_enable(struct amd_iommu *iommu)
748 #ifdef CONFIG_IRQ_REMAP
754 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
756 /* Check if already running */
757 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
760 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
761 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
763 for (i = 0; i < LOOP_TIMEOUT; ++i) {
764 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
765 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
769 if (i >= LOOP_TIMEOUT)
771 #endif /* CONFIG_IRQ_REMAP */
775 #ifdef CONFIG_IRQ_REMAP
776 static int iommu_init_ga_log(struct amd_iommu *iommu)
780 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
783 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
784 get_order(GA_LOG_SIZE));
788 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
790 if (!iommu->ga_log_tail)
793 entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
794 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
795 &entry, sizeof(entry));
796 entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
797 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
798 &entry, sizeof(entry));
799 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
800 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
807 #endif /* CONFIG_IRQ_REMAP */
809 static int iommu_init_ga(struct amd_iommu *iommu)
813 #ifdef CONFIG_IRQ_REMAP
814 /* Note: We have already checked GASup from IVRS table.
815 * Now, we need to make sure that GAMSup is set.
817 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
818 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
819 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
821 ret = iommu_init_ga_log(iommu);
822 #endif /* CONFIG_IRQ_REMAP */
827 static void iommu_enable_gt(struct amd_iommu *iommu)
829 if (!iommu_feature(iommu, FEATURE_GT))
832 iommu_feature_enable(iommu, CONTROL_GT_EN);
835 /* sets a specific bit in the device table entry. */
836 static void set_dev_entry_bit(u16 devid, u8 bit)
838 int i = (bit >> 6) & 0x03;
839 int _bit = bit & 0x3f;
841 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
844 static int get_dev_entry_bit(u16 devid, u8 bit)
846 int i = (bit >> 6) & 0x03;
847 int _bit = bit & 0x3f;
849 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
853 static bool copy_device_table(void)
855 struct dev_table_entry *old_devtb = NULL;
856 u32 lo, hi, devid, old_devtb_size;
857 phys_addr_t old_devtb_phys;
858 u64 entry, last_entry = 0;
859 struct amd_iommu *iommu;
863 if (!amd_iommu_pre_enabled)
866 pr_warn("Translation is already enabled - trying to copy translation structures\n");
867 for_each_iommu(iommu) {
868 /* All IOMMUs should use the same device table with the same size */
869 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
870 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
871 entry = (((u64) hi) << 32) + lo;
872 if (last_entry && last_entry != entry) {
873 pr_err("IOMMU:%d should use the same dev table as others!/n",
879 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
880 if (old_devtb_size != dev_table_size) {
881 pr_err("The device table size of IOMMU:%d is not expected!/n",
887 old_devtb_phys = entry & PAGE_MASK;
888 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
892 gfp_flag = GFP_KERNEL | __GFP_ZERO;
893 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
894 get_order(dev_table_size));
895 if (old_dev_tbl_cpy == NULL) {
896 pr_err("Failed to allocate memory for copying old device table!/n");
900 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
901 old_dev_tbl_cpy[devid] = old_devtb[devid];
902 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
903 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
905 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
912 void amd_iommu_apply_erratum_63(u16 devid)
916 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
917 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
920 set_dev_entry_bit(devid, DEV_ENTRY_IW);
923 /* Writes the specific IOMMU for a device into the rlookup table */
924 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
926 amd_iommu_rlookup_table[devid] = iommu;
930 * This function takes the device specific flags read from the ACPI
931 * table and sets up the device table entry with that information
933 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
934 u16 devid, u32 flags, u32 ext_flags)
936 if (flags & ACPI_DEVFLAG_INITPASS)
937 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
938 if (flags & ACPI_DEVFLAG_EXTINT)
939 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
940 if (flags & ACPI_DEVFLAG_NMI)
941 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
942 if (flags & ACPI_DEVFLAG_SYSMGT1)
943 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
944 if (flags & ACPI_DEVFLAG_SYSMGT2)
945 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
946 if (flags & ACPI_DEVFLAG_LINT0)
947 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
948 if (flags & ACPI_DEVFLAG_LINT1)
949 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
951 amd_iommu_apply_erratum_63(devid);
953 set_iommu_for_device(iommu, devid);
956 static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
958 struct devid_map *entry;
959 struct list_head *list;
961 if (type == IVHD_SPECIAL_IOAPIC)
963 else if (type == IVHD_SPECIAL_HPET)
968 list_for_each_entry(entry, list, list) {
969 if (!(entry->id == id && entry->cmd_line))
972 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
973 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
975 *devid = entry->devid;
980 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
985 entry->devid = *devid;
986 entry->cmd_line = cmd_line;
988 list_add_tail(&entry->list, list);
993 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
996 struct acpihid_map_entry *entry;
997 struct list_head *list = &acpihid_map;
999 list_for_each_entry(entry, list, list) {
1000 if (strcmp(entry->hid, hid) ||
1001 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1005 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
1007 *devid = entry->devid;
1011 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1015 memcpy(entry->uid, uid, strlen(uid));
1016 memcpy(entry->hid, hid, strlen(hid));
1017 entry->devid = *devid;
1018 entry->cmd_line = cmd_line;
1019 entry->root_devid = (entry->devid & (~0x7));
1021 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
1022 entry->cmd_line ? "cmd" : "ivrs",
1023 entry->hid, entry->uid, entry->root_devid);
1025 list_add_tail(&entry->list, list);
1029 static int __init add_early_maps(void)
1033 for (i = 0; i < early_ioapic_map_size; ++i) {
1034 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1035 early_ioapic_map[i].id,
1036 &early_ioapic_map[i].devid,
1037 early_ioapic_map[i].cmd_line);
1042 for (i = 0; i < early_hpet_map_size; ++i) {
1043 ret = add_special_device(IVHD_SPECIAL_HPET,
1044 early_hpet_map[i].id,
1045 &early_hpet_map[i].devid,
1046 early_hpet_map[i].cmd_line);
1051 for (i = 0; i < early_acpihid_map_size; ++i) {
1052 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1053 early_acpihid_map[i].uid,
1054 &early_acpihid_map[i].devid,
1055 early_acpihid_map[i].cmd_line);
1064 * Reads the device exclusion range from ACPI and initializes the IOMMU with
1067 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1069 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1071 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1076 * We only can configure exclusion ranges per IOMMU, not
1077 * per device. But we can enable the exclusion range per
1078 * device. This is done here
1080 set_dev_entry_bit(devid, DEV_ENTRY_EX);
1081 iommu->exclusion_start = m->range_start;
1082 iommu->exclusion_length = m->range_length;
1087 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1088 * initializes the hardware and our data structures with it.
1090 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1091 struct ivhd_header *h)
1094 u8 *end = p, flags = 0;
1095 u16 devid = 0, devid_start = 0, devid_to = 0;
1096 u32 dev_i, ext_flags = 0;
1098 struct ivhd_entry *e;
1103 ret = add_early_maps();
1108 * First save the recommended feature enable bits from ACPI
1110 iommu->acpi_flags = h->flags;
1113 * Done. Now parse the device entries
1115 ivhd_size = get_ivhd_header_size(h);
1117 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1127 e = (struct ivhd_entry *)p;
1131 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1133 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1134 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1136 case IVHD_DEV_SELECT:
1138 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1140 PCI_BUS_NUM(e->devid),
1146 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1148 case IVHD_DEV_SELECT_RANGE_START:
1150 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1151 "devid: %02x:%02x.%x flags: %02x\n",
1152 PCI_BUS_NUM(e->devid),
1157 devid_start = e->devid;
1162 case IVHD_DEV_ALIAS:
1164 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1165 "flags: %02x devid_to: %02x:%02x.%x\n",
1166 PCI_BUS_NUM(e->devid),
1170 PCI_BUS_NUM(e->ext >> 8),
1171 PCI_SLOT(e->ext >> 8),
1172 PCI_FUNC(e->ext >> 8));
1175 devid_to = e->ext >> 8;
1176 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1177 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1178 amd_iommu_alias_table[devid] = devid_to;
1180 case IVHD_DEV_ALIAS_RANGE:
1182 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1183 "devid: %02x:%02x.%x flags: %02x "
1184 "devid_to: %02x:%02x.%x\n",
1185 PCI_BUS_NUM(e->devid),
1189 PCI_BUS_NUM(e->ext >> 8),
1190 PCI_SLOT(e->ext >> 8),
1191 PCI_FUNC(e->ext >> 8));
1193 devid_start = e->devid;
1195 devid_to = e->ext >> 8;
1199 case IVHD_DEV_EXT_SELECT:
1201 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1202 "flags: %02x ext: %08x\n",
1203 PCI_BUS_NUM(e->devid),
1209 set_dev_entry_from_acpi(iommu, devid, e->flags,
1212 case IVHD_DEV_EXT_SELECT_RANGE:
1214 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1215 "%02x:%02x.%x flags: %02x ext: %08x\n",
1216 PCI_BUS_NUM(e->devid),
1221 devid_start = e->devid;
1226 case IVHD_DEV_RANGE_END:
1228 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1229 PCI_BUS_NUM(e->devid),
1231 PCI_FUNC(e->devid));
1234 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1236 amd_iommu_alias_table[dev_i] = devid_to;
1237 set_dev_entry_from_acpi(iommu,
1238 devid_to, flags, ext_flags);
1240 set_dev_entry_from_acpi(iommu, dev_i,
1244 case IVHD_DEV_SPECIAL: {
1250 handle = e->ext & 0xff;
1251 devid = (e->ext >> 8) & 0xffff;
1252 type = (e->ext >> 24) & 0xff;
1254 if (type == IVHD_SPECIAL_IOAPIC)
1256 else if (type == IVHD_SPECIAL_HPET)
1261 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1267 ret = add_special_device(type, handle, &devid, false);
1272 * add_special_device might update the devid in case a
1273 * command-line override is present. So call
1274 * set_dev_entry_from_acpi after add_special_device.
1276 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1280 case IVHD_DEV_ACPI_HID: {
1282 u8 hid[ACPIHID_HID_LEN] = {0};
1283 u8 uid[ACPIHID_UID_LEN] = {0};
1286 if (h->type != 0x40) {
1287 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1292 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1293 hid[ACPIHID_HID_LEN - 1] = '\0';
1296 pr_err(FW_BUG "Invalid HID.\n");
1301 case UID_NOT_PRESENT:
1304 pr_warn(FW_BUG "Invalid UID length.\n");
1307 case UID_IS_INTEGER:
1309 sprintf(uid, "%d", e->uid);
1312 case UID_IS_CHARACTER:
1314 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1315 uid[ACPIHID_UID_LEN - 1] = '\0';
1323 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1331 ret = add_acpi_hid_device(hid, uid, &devid, false);
1336 * add_special_device might update the devid in case a
1337 * command-line override is present. So call
1338 * set_dev_entry_from_acpi after add_special_device.
1340 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1348 p += ivhd_entry_length(p);
1354 static void __init free_iommu_one(struct amd_iommu *iommu)
1356 free_command_buffer(iommu);
1357 free_event_buffer(iommu);
1358 free_ppr_log(iommu);
1360 iommu_unmap_mmio_space(iommu);
1363 static void __init free_iommu_all(void)
1365 struct amd_iommu *iommu, *next;
1367 for_each_iommu_safe(iommu, next) {
1368 list_del(&iommu->list);
1369 free_iommu_one(iommu);
1375 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1377 * BIOS should disable L2B micellaneous clock gating by setting
1378 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1380 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1384 if ((boot_cpu_data.x86 != 0x15) ||
1385 (boot_cpu_data.x86_model < 0x10) ||
1386 (boot_cpu_data.x86_model > 0x1f))
1389 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1390 pci_read_config_dword(iommu->dev, 0xf4, &value);
1395 /* Select NB indirect register 0x90 and enable writing */
1396 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1398 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1399 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1400 dev_name(&iommu->dev->dev));
1402 /* Clear the enable writing bit */
1403 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1407 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1409 * BIOS should enable ATS write permission check by setting
1410 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1412 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1416 if ((boot_cpu_data.x86 != 0x15) ||
1417 (boot_cpu_data.x86_model < 0x30) ||
1418 (boot_cpu_data.x86_model > 0x3f))
1421 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1422 value = iommu_read_l2(iommu, 0x47);
1427 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1428 iommu_write_l2(iommu, 0x47, value | BIT(0));
1430 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1431 dev_name(&iommu->dev->dev));
1435 * This function clues the initialization function for one IOMMU
1436 * together and also allocates the command buffer and programs the
1437 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1439 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1443 spin_lock_init(&iommu->lock);
1445 /* Add IOMMU to internal data structures */
1446 list_add_tail(&iommu->list, &amd_iommu_list);
1447 iommu->index = amd_iommus_present++;
1449 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1450 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1454 /* Index is fine - add IOMMU to the array */
1455 amd_iommus[iommu->index] = iommu;
1458 * Copy data from ACPI table entry to the iommu struct
1460 iommu->devid = h->devid;
1461 iommu->cap_ptr = h->cap_ptr;
1462 iommu->pci_seg = h->pci_seg;
1463 iommu->mmio_phys = h->mmio_phys;
1467 /* Check if IVHD EFR contains proper max banks/counters */
1468 if ((h->efr_attr != 0) &&
1469 ((h->efr_attr & (0xF << 13)) != 0) &&
1470 ((h->efr_attr & (0x3F << 17)) != 0))
1471 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1473 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1474 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1475 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1479 if (h->efr_reg & (1 << 9))
1480 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1482 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1483 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1484 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1490 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1491 iommu->mmio_phys_end);
1492 if (!iommu->mmio_base)
1495 if (alloc_command_buffer(iommu))
1498 if (alloc_event_buffer(iommu))
1501 iommu->int_enabled = false;
1503 init_translation_status(iommu);
1504 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1505 iommu_disable(iommu);
1506 clear_translation_pre_enabled(iommu);
1507 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1510 if (amd_iommu_pre_enabled)
1511 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1513 ret = init_iommu_from_acpi(iommu, h);
1517 ret = amd_iommu_create_irq_domain(iommu);
1522 * Make sure IOMMU is not considered to translate itself. The IVRS
1523 * table tells us so, but this is a lie!
1525 amd_iommu_rlookup_table[iommu->devid] = NULL;
1531 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1532 * @ivrs Pointer to the IVRS header
1534 * This function search through all IVDB of the maximum supported IVHD
1536 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1538 u8 *base = (u8 *)ivrs;
1539 struct ivhd_header *ivhd = (struct ivhd_header *)
1540 (base + IVRS_HEADER_LENGTH);
1541 u8 last_type = ivhd->type;
1542 u16 devid = ivhd->devid;
1544 while (((u8 *)ivhd - base < ivrs->length) &&
1545 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1546 u8 *p = (u8 *) ivhd;
1548 if (ivhd->devid == devid)
1549 last_type = ivhd->type;
1550 ivhd = (struct ivhd_header *)(p + ivhd->length);
1557 * Iterates over all IOMMU entries in the ACPI table, allocates the
1558 * IOMMU structure and initializes it with init_iommu_one()
1560 static int __init init_iommu_all(struct acpi_table_header *table)
1562 u8 *p = (u8 *)table, *end = (u8 *)table;
1563 struct ivhd_header *h;
1564 struct amd_iommu *iommu;
1567 end += table->length;
1568 p += IVRS_HEADER_LENGTH;
1571 h = (struct ivhd_header *)p;
1572 if (*p == amd_iommu_target_ivhd_type) {
1574 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1575 "seg: %d flags: %01x info %04x\n",
1576 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1577 PCI_FUNC(h->devid), h->cap_ptr,
1578 h->pci_seg, h->flags, h->info);
1579 DUMP_printk(" mmio-addr: %016llx\n",
1582 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1586 ret = init_iommu_one(iommu, h);
1598 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1599 u8 fxn, u64 *value, bool is_write);
1601 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1603 u64 val = 0xabcd, val2 = 0;
1605 if (!iommu_feature(iommu, FEATURE_PC))
1608 amd_iommu_pc_present = true;
1610 /* Check if the performance counters can be written to */
1611 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1612 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1614 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1615 amd_iommu_pc_present = false;
1619 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1621 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1622 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1623 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1626 static ssize_t amd_iommu_show_cap(struct device *dev,
1627 struct device_attribute *attr,
1630 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1631 return sprintf(buf, "%x\n", iommu->cap);
1633 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1635 static ssize_t amd_iommu_show_features(struct device *dev,
1636 struct device_attribute *attr,
1639 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1640 return sprintf(buf, "%llx\n", iommu->features);
1642 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1644 static struct attribute *amd_iommu_attrs[] = {
1646 &dev_attr_features.attr,
1650 static struct attribute_group amd_iommu_group = {
1651 .name = "amd-iommu",
1652 .attrs = amd_iommu_attrs,
1655 static const struct attribute_group *amd_iommu_groups[] = {
1660 static int iommu_init_pci(struct amd_iommu *iommu)
1662 int cap_ptr = iommu->cap_ptr;
1663 u32 range, misc, low, high;
1666 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1667 iommu->devid & 0xff);
1671 /* Prevent binding other PCI device drivers to IOMMU devices */
1672 iommu->dev->match_driver = false;
1674 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1676 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1678 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1681 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1682 amd_iommu_iotlb_sup = false;
1684 /* read extended feature bits */
1685 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1686 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1688 iommu->features = ((u64)high << 32) | low;
1690 if (iommu_feature(iommu, FEATURE_GT)) {
1695 pasmax = iommu->features & FEATURE_PASID_MASK;
1696 pasmax >>= FEATURE_PASID_SHIFT;
1697 max_pasid = (1 << (pasmax + 1)) - 1;
1699 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1701 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1703 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1704 glxval >>= FEATURE_GLXVAL_SHIFT;
1706 if (amd_iommu_max_glx_val == -1)
1707 amd_iommu_max_glx_val = glxval;
1709 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1712 if (iommu_feature(iommu, FEATURE_GT) &&
1713 iommu_feature(iommu, FEATURE_PPR)) {
1714 iommu->is_iommu_v2 = true;
1715 amd_iommu_v2_present = true;
1718 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1721 ret = iommu_init_ga(iommu);
1725 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1726 amd_iommu_np_cache = true;
1728 init_iommu_perf_ctr(iommu);
1730 if (is_rd890_iommu(iommu->dev)) {
1733 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1737 * Some rd890 systems may not be fully reconfigured by the
1738 * BIOS, so it's necessary for us to store this information so
1739 * it can be reprogrammed on resume
1741 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1742 &iommu->stored_addr_lo);
1743 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1744 &iommu->stored_addr_hi);
1746 /* Low bit locks writes to configuration space */
1747 iommu->stored_addr_lo &= ~1;
1749 for (i = 0; i < 6; i++)
1750 for (j = 0; j < 0x12; j++)
1751 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1753 for (i = 0; i < 0x83; i++)
1754 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1757 amd_iommu_erratum_746_workaround(iommu);
1758 amd_iommu_ats_write_check_workaround(iommu);
1760 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1761 amd_iommu_groups, "ivhd%d", iommu->index);
1762 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1763 iommu_device_register(&iommu->iommu);
1765 return pci_enable_device(iommu->dev);
1768 static void print_iommu_info(void)
1770 static const char * const feat_str[] = {
1771 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1772 "IA", "GA", "HE", "PC"
1774 struct amd_iommu *iommu;
1776 for_each_iommu(iommu) {
1779 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1780 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1782 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1783 pr_info("AMD-Vi: Extended features (%#llx):\n",
1785 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1786 if (iommu_feature(iommu, (1ULL << i)))
1787 pr_cont(" %s", feat_str[i]);
1790 if (iommu->features & FEATURE_GAM_VAPIC)
1791 pr_cont(" GA_vAPIC");
1796 if (irq_remapping_enabled) {
1797 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1798 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1799 pr_info("AMD-Vi: virtual APIC enabled\n");
1803 static int __init amd_iommu_init_pci(void)
1805 struct amd_iommu *iommu;
1808 for_each_iommu(iommu) {
1809 ret = iommu_init_pci(iommu);
1815 * Order is important here to make sure any unity map requirements are
1816 * fulfilled. The unity mappings are created and written to the device
1817 * table during the amd_iommu_init_api() call.
1819 * After that we call init_device_table_dma() to make sure any
1820 * uninitialized DTE will block DMA, and in the end we flush the caches
1821 * of all IOMMUs to make sure the changes to the device table are
1824 ret = amd_iommu_init_api();
1826 init_device_table_dma();
1828 for_each_iommu(iommu)
1829 iommu_flush_all_caches(iommu);
1837 /****************************************************************************
1839 * The following functions initialize the MSI interrupts for all IOMMUs
1840 * in the system. It's a bit challenging because there could be multiple
1841 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1844 ****************************************************************************/
1846 static int iommu_setup_msi(struct amd_iommu *iommu)
1850 r = pci_enable_msi(iommu->dev);
1854 r = request_threaded_irq(iommu->dev->irq,
1855 amd_iommu_int_handler,
1856 amd_iommu_int_thread,
1861 pci_disable_msi(iommu->dev);
1865 iommu->int_enabled = true;
1870 static int iommu_init_msi(struct amd_iommu *iommu)
1874 if (iommu->int_enabled)
1877 if (iommu->dev->msi_cap)
1878 ret = iommu_setup_msi(iommu);
1886 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1888 if (iommu->ppr_log != NULL)
1889 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1891 iommu_ga_log_enable(iommu);
1896 /****************************************************************************
1898 * The next functions belong to the third pass of parsing the ACPI
1899 * table. In this last pass the memory mapping requirements are
1900 * gathered (like exclusion and unity mapping ranges).
1902 ****************************************************************************/
1904 static void __init free_unity_maps(void)
1906 struct unity_map_entry *entry, *next;
1908 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1909 list_del(&entry->list);
1914 /* called when we find an exclusion range definition in ACPI */
1915 static int __init init_exclusion_range(struct ivmd_header *m)
1920 case ACPI_IVMD_TYPE:
1921 set_device_exclusion_range(m->devid, m);
1923 case ACPI_IVMD_TYPE_ALL:
1924 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1925 set_device_exclusion_range(i, m);
1927 case ACPI_IVMD_TYPE_RANGE:
1928 for (i = m->devid; i <= m->aux; ++i)
1929 set_device_exclusion_range(i, m);
1938 /* called for unity map ACPI definition */
1939 static int __init init_unity_map_range(struct ivmd_header *m)
1941 struct unity_map_entry *e = NULL;
1944 e = kzalloc(sizeof(*e), GFP_KERNEL);
1952 case ACPI_IVMD_TYPE:
1953 s = "IVMD_TYPEi\t\t\t";
1954 e->devid_start = e->devid_end = m->devid;
1956 case ACPI_IVMD_TYPE_ALL:
1957 s = "IVMD_TYPE_ALL\t\t";
1959 e->devid_end = amd_iommu_last_bdf;
1961 case ACPI_IVMD_TYPE_RANGE:
1962 s = "IVMD_TYPE_RANGE\t\t";
1963 e->devid_start = m->devid;
1964 e->devid_end = m->aux;
1967 e->address_start = PAGE_ALIGN(m->range_start);
1968 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1969 e->prot = m->flags >> 1;
1971 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1972 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1973 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1974 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1975 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1976 e->address_start, e->address_end, m->flags);
1978 list_add_tail(&e->list, &amd_iommu_unity_map);
1983 /* iterates over all memory definitions we find in the ACPI table */
1984 static int __init init_memory_definitions(struct acpi_table_header *table)
1986 u8 *p = (u8 *)table, *end = (u8 *)table;
1987 struct ivmd_header *m;
1989 end += table->length;
1990 p += IVRS_HEADER_LENGTH;
1993 m = (struct ivmd_header *)p;
1994 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1995 init_exclusion_range(m);
1996 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1997 init_unity_map_range(m);
2006 * Init the device table to not allow DMA access for devices
2008 static void init_device_table_dma(void)
2012 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2013 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2014 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2018 static void __init uninit_device_table_dma(void)
2022 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2023 amd_iommu_dev_table[devid].data[0] = 0ULL;
2024 amd_iommu_dev_table[devid].data[1] = 0ULL;
2028 static void init_device_table(void)
2032 if (!amd_iommu_irq_remap)
2035 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2036 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2039 static void iommu_init_flags(struct amd_iommu *iommu)
2041 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2042 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2043 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2045 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2046 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2047 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2049 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2050 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2051 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2053 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2054 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2055 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2058 * make IOMMU memory accesses cache coherent
2060 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2062 /* Set IOTLB invalidation timeout to 1s */
2063 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2066 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2069 u32 ioc_feature_control;
2070 struct pci_dev *pdev = iommu->root_pdev;
2072 /* RD890 BIOSes may not have completely reconfigured the iommu */
2073 if (!is_rd890_iommu(iommu->dev) || !pdev)
2077 * First, we need to ensure that the iommu is enabled. This is
2078 * controlled by a register in the northbridge
2081 /* Select Northbridge indirect register 0x75 and enable writing */
2082 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2083 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2085 /* Enable the iommu */
2086 if (!(ioc_feature_control & 0x1))
2087 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2089 /* Restore the iommu BAR */
2090 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2091 iommu->stored_addr_lo);
2092 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2093 iommu->stored_addr_hi);
2095 /* Restore the l1 indirect regs for each of the 6 l1s */
2096 for (i = 0; i < 6; i++)
2097 for (j = 0; j < 0x12; j++)
2098 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2100 /* Restore the l2 indirect regs */
2101 for (i = 0; i < 0x83; i++)
2102 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2104 /* Lock PCI setup registers */
2105 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2106 iommu->stored_addr_lo | 1);
2109 static void iommu_enable_ga(struct amd_iommu *iommu)
2111 #ifdef CONFIG_IRQ_REMAP
2112 switch (amd_iommu_guest_ir) {
2113 case AMD_IOMMU_GUEST_IR_VAPIC:
2114 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2116 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2117 iommu_feature_enable(iommu, CONTROL_GA_EN);
2118 iommu->irte_ops = &irte_128_ops;
2121 iommu->irte_ops = &irte_32_ops;
2127 static void early_enable_iommu(struct amd_iommu *iommu)
2129 iommu_disable(iommu);
2130 iommu_init_flags(iommu);
2131 iommu_set_device_table(iommu);
2132 iommu_enable_command_buffer(iommu);
2133 iommu_enable_event_buffer(iommu);
2134 iommu_set_exclusion_range(iommu);
2135 iommu_enable_ga(iommu);
2136 iommu_enable(iommu);
2137 iommu_flush_all_caches(iommu);
2141 * This function finally enables all IOMMUs found in the system after
2142 * they have been initialized.
2144 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2145 * the old content of device table entries. Not this case or copy failed,
2146 * just continue as normal kernel does.
2148 static void early_enable_iommus(void)
2150 struct amd_iommu *iommu;
2153 if (!copy_device_table()) {
2155 * If come here because of failure in copying device table from old
2156 * kernel with all IOMMUs enabled, print error message and try to
2157 * free allocated old_dev_tbl_cpy.
2159 if (amd_iommu_pre_enabled)
2160 pr_err("Failed to copy DEV table from previous kernel.\n");
2161 if (old_dev_tbl_cpy != NULL)
2162 free_pages((unsigned long)old_dev_tbl_cpy,
2163 get_order(dev_table_size));
2165 for_each_iommu(iommu) {
2166 clear_translation_pre_enabled(iommu);
2167 early_enable_iommu(iommu);
2170 pr_info("Copied DEV table from previous kernel.\n");
2171 free_pages((unsigned long)amd_iommu_dev_table,
2172 get_order(dev_table_size));
2173 amd_iommu_dev_table = old_dev_tbl_cpy;
2174 for_each_iommu(iommu) {
2175 iommu_disable_command_buffer(iommu);
2176 iommu_disable_event_buffer(iommu);
2177 iommu_enable_command_buffer(iommu);
2178 iommu_enable_event_buffer(iommu);
2179 iommu_enable_ga(iommu);
2180 iommu_set_device_table(iommu);
2181 iommu_flush_all_caches(iommu);
2185 #ifdef CONFIG_IRQ_REMAP
2186 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2187 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2191 static void enable_iommus_v2(void)
2193 struct amd_iommu *iommu;
2195 for_each_iommu(iommu) {
2196 iommu_enable_ppr_log(iommu);
2197 iommu_enable_gt(iommu);
2201 static void enable_iommus(void)
2203 early_enable_iommus();
2208 static void disable_iommus(void)
2210 struct amd_iommu *iommu;
2212 for_each_iommu(iommu)
2213 iommu_disable(iommu);
2215 #ifdef CONFIG_IRQ_REMAP
2216 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2217 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2222 * Suspend/Resume support
2223 * disable suspend until real resume implemented
2226 static void amd_iommu_resume(void)
2228 struct amd_iommu *iommu;
2230 for_each_iommu(iommu)
2231 iommu_apply_resume_quirks(iommu);
2233 /* re-load the hardware */
2236 amd_iommu_enable_interrupts();
2239 static int amd_iommu_suspend(void)
2241 /* disable IOMMUs to go out of the way for BIOS */
2247 static struct syscore_ops amd_iommu_syscore_ops = {
2248 .suspend = amd_iommu_suspend,
2249 .resume = amd_iommu_resume,
2252 static void __init free_iommu_resources(void)
2254 kmemleak_free(irq_lookup_table);
2255 free_pages((unsigned long)irq_lookup_table,
2256 get_order(rlookup_table_size));
2257 irq_lookup_table = NULL;
2259 kmem_cache_destroy(amd_iommu_irq_cache);
2260 amd_iommu_irq_cache = NULL;
2262 free_pages((unsigned long)amd_iommu_rlookup_table,
2263 get_order(rlookup_table_size));
2264 amd_iommu_rlookup_table = NULL;
2266 free_pages((unsigned long)amd_iommu_alias_table,
2267 get_order(alias_table_size));
2268 amd_iommu_alias_table = NULL;
2270 free_pages((unsigned long)amd_iommu_dev_table,
2271 get_order(dev_table_size));
2272 amd_iommu_dev_table = NULL;
2276 #ifdef CONFIG_GART_IOMMU
2278 * We failed to initialize the AMD IOMMU - try fallback to GART
2286 /* SB IOAPIC is always on this device in AMD systems */
2287 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2289 static bool __init check_ioapic_information(void)
2291 const char *fw_bug = FW_BUG;
2292 bool ret, has_sb_ioapic;
2295 has_sb_ioapic = false;
2299 * If we have map overrides on the kernel command line the
2300 * messages in this function might not describe firmware bugs
2301 * anymore - so be careful
2306 for (idx = 0; idx < nr_ioapics; idx++) {
2307 int devid, id = mpc_ioapic_id(idx);
2309 devid = get_ioapic_devid(id);
2311 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2314 } else if (devid == IOAPIC_SB_DEVID) {
2315 has_sb_ioapic = true;
2320 if (!has_sb_ioapic) {
2322 * We expect the SB IOAPIC to be listed in the IVRS
2323 * table. The system timer is connected to the SB IOAPIC
2324 * and if we don't have it in the list the system will
2325 * panic at boot time. This situation usually happens
2326 * when the BIOS is buggy and provides us the wrong
2327 * device id for the IOAPIC in the system.
2329 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
2333 pr_err("AMD-Vi: Disabling interrupt remapping\n");
2338 static void __init free_dma_resources(void)
2340 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2341 get_order(MAX_DOMAIN_ID/8));
2342 amd_iommu_pd_alloc_bitmap = NULL;
2348 * This is the hardware init function for AMD IOMMU in the system.
2349 * This function is called either from amd_iommu_init or from the interrupt
2350 * remapping setup code.
2352 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2355 * 1 pass) Discover the most comprehensive IVHD type to use.
2357 * 2 pass) Find the highest PCI device id the driver has to handle.
2358 * Upon this information the size of the data structures is
2359 * determined that needs to be allocated.
2361 * 3 pass) Initialize the data structures just allocated with the
2362 * information in the ACPI table about available AMD IOMMUs
2363 * in the system. It also maps the PCI devices in the
2364 * system to specific IOMMUs
2366 * 4 pass) After the basic data structures are allocated and
2367 * initialized we update them with information about memory
2368 * remapping requirements parsed out of the ACPI table in
2371 * After everything is set up the IOMMUs are enabled and the necessary
2372 * hotplug and suspend notifiers are registered.
2374 static int __init early_amd_iommu_init(void)
2376 struct acpi_table_header *ivrs_base;
2378 int i, remap_cache_sz, ret = 0;
2380 if (!amd_iommu_detected)
2383 status = acpi_get_table("IVRS", 0, &ivrs_base);
2384 if (status == AE_NOT_FOUND)
2386 else if (ACPI_FAILURE(status)) {
2387 const char *err = acpi_format_exception(status);
2388 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2393 * Validate checksum here so we don't need to do it when
2394 * we actually parse the table
2396 ret = check_ivrs_checksum(ivrs_base);
2400 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2401 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2404 * First parse ACPI tables to find the largest Bus/Dev/Func
2405 * we need to handle. Upon this information the shared data
2406 * structures for the IOMMUs in the system will be allocated
2408 ret = find_last_devid_acpi(ivrs_base);
2412 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2413 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2414 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2416 /* Device table - directly used by all IOMMUs */
2418 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2419 get_order(dev_table_size));
2420 if (amd_iommu_dev_table == NULL)
2424 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2425 * IOMMU see for that device
2427 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2428 get_order(alias_table_size));
2429 if (amd_iommu_alias_table == NULL)
2432 /* IOMMU rlookup table - find the IOMMU for a specific device */
2433 amd_iommu_rlookup_table = (void *)__get_free_pages(
2434 GFP_KERNEL | __GFP_ZERO,
2435 get_order(rlookup_table_size));
2436 if (amd_iommu_rlookup_table == NULL)
2439 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2440 GFP_KERNEL | __GFP_ZERO,
2441 get_order(MAX_DOMAIN_ID/8));
2442 if (amd_iommu_pd_alloc_bitmap == NULL)
2446 * let all alias entries point to itself
2448 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2449 amd_iommu_alias_table[i] = i;
2452 * never allocate domain 0 because its used as the non-allocated and
2453 * error value placeholder
2455 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2457 spin_lock_init(&amd_iommu_pd_lock);
2460 * now the data structures are allocated and basically initialized
2461 * start the real acpi table scan
2463 ret = init_iommu_all(ivrs_base);
2467 /* Disable any previously enabled IOMMUs */
2470 if (amd_iommu_irq_remap)
2471 amd_iommu_irq_remap = check_ioapic_information();
2473 if (amd_iommu_irq_remap) {
2475 * Interrupt remapping enabled, create kmem_cache for the
2479 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2480 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2482 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2483 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2485 IRQ_TABLE_ALIGNMENT,
2487 if (!amd_iommu_irq_cache)
2490 irq_lookup_table = (void *)__get_free_pages(
2491 GFP_KERNEL | __GFP_ZERO,
2492 get_order(rlookup_table_size));
2493 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2495 if (!irq_lookup_table)
2499 ret = init_memory_definitions(ivrs_base);
2503 /* init the device table */
2504 init_device_table();
2507 /* Don't leak any ACPI memory */
2508 acpi_put_table(ivrs_base);
2514 static int amd_iommu_enable_interrupts(void)
2516 struct amd_iommu *iommu;
2519 for_each_iommu(iommu) {
2520 ret = iommu_init_msi(iommu);
2529 static bool detect_ivrs(void)
2531 struct acpi_table_header *ivrs_base;
2534 status = acpi_get_table("IVRS", 0, &ivrs_base);
2535 if (status == AE_NOT_FOUND)
2537 else if (ACPI_FAILURE(status)) {
2538 const char *err = acpi_format_exception(status);
2539 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2543 acpi_put_table(ivrs_base);
2545 /* Make sure ACS will be enabled during PCI probe */
2551 /****************************************************************************
2553 * AMD IOMMU Initialization State Machine
2555 ****************************************************************************/
2557 static int __init state_next(void)
2561 switch (init_state) {
2562 case IOMMU_START_STATE:
2563 if (!detect_ivrs()) {
2564 init_state = IOMMU_NOT_FOUND;
2567 init_state = IOMMU_IVRS_DETECTED;
2570 case IOMMU_IVRS_DETECTED:
2571 ret = early_amd_iommu_init();
2572 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2573 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2574 pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
2575 free_dma_resources();
2576 free_iommu_resources();
2577 init_state = IOMMU_CMDLINE_DISABLED;
2581 case IOMMU_ACPI_FINISHED:
2582 early_enable_iommus();
2583 x86_platform.iommu_shutdown = disable_iommus;
2584 init_state = IOMMU_ENABLED;
2587 register_syscore_ops(&amd_iommu_syscore_ops);
2588 ret = amd_iommu_init_pci();
2589 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2592 case IOMMU_PCI_INIT:
2593 ret = amd_iommu_enable_interrupts();
2594 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2596 case IOMMU_INTERRUPTS_EN:
2597 ret = amd_iommu_init_dma_ops();
2598 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2601 init_state = IOMMU_INITIALIZED;
2603 case IOMMU_INITIALIZED:
2606 case IOMMU_NOT_FOUND:
2607 case IOMMU_INIT_ERROR:
2608 case IOMMU_CMDLINE_DISABLED:
2609 /* Error states => do nothing */
2620 static int __init iommu_go_to_state(enum iommu_init_state state)
2624 while (init_state != state) {
2625 if (init_state == IOMMU_NOT_FOUND ||
2626 init_state == IOMMU_INIT_ERROR ||
2627 init_state == IOMMU_CMDLINE_DISABLED)
2635 #ifdef CONFIG_IRQ_REMAP
2636 int __init amd_iommu_prepare(void)
2640 amd_iommu_irq_remap = true;
2642 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2645 return amd_iommu_irq_remap ? 0 : -ENODEV;
2648 int __init amd_iommu_enable(void)
2652 ret = iommu_go_to_state(IOMMU_ENABLED);
2656 irq_remapping_enabled = 1;
2661 void amd_iommu_disable(void)
2663 amd_iommu_suspend();
2666 int amd_iommu_reenable(int mode)
2673 int __init amd_iommu_enable_faulting(void)
2675 /* We enable MSI later when PCI is initialized */
2681 * This is the core init function for AMD IOMMU hardware in the system.
2682 * This function is called from the generic x86 DMA layer initialization
2685 static int __init amd_iommu_init(void)
2689 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2691 free_dma_resources();
2692 if (!irq_remapping_enabled) {
2694 free_iommu_resources();
2696 struct amd_iommu *iommu;
2698 uninit_device_table_dma();
2699 for_each_iommu(iommu)
2700 iommu_flush_all_caches(iommu);
2707 /****************************************************************************
2709 * Early detect code. This code runs at IOMMU detection time in the DMA
2710 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2713 ****************************************************************************/
2714 int __init amd_iommu_detect(void)
2718 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2721 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2725 amd_iommu_detected = true;
2727 x86_init.iommu.iommu_init = amd_iommu_init;
2732 /****************************************************************************
2734 * Parsing functions for the AMD IOMMU specific kernel command line
2737 ****************************************************************************/
2739 static int __init parse_amd_iommu_dump(char *str)
2741 amd_iommu_dump = true;
2746 static int __init parse_amd_iommu_intr(char *str)
2748 for (; *str; ++str) {
2749 if (strncmp(str, "legacy", 6) == 0) {
2750 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2753 if (strncmp(str, "vapic", 5) == 0) {
2754 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2761 static int __init parse_amd_iommu_options(char *str)
2763 for (; *str; ++str) {
2764 if (strncmp(str, "fullflush", 9) == 0)
2765 amd_iommu_unmap_flush = true;
2766 if (strncmp(str, "off", 3) == 0)
2767 amd_iommu_disabled = true;
2768 if (strncmp(str, "force_isolation", 15) == 0)
2769 amd_iommu_force_isolation = true;
2775 static int __init parse_ivrs_ioapic(char *str)
2777 unsigned int bus, dev, fn;
2781 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2784 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2788 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2789 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2794 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2796 cmdline_maps = true;
2797 i = early_ioapic_map_size++;
2798 early_ioapic_map[i].id = id;
2799 early_ioapic_map[i].devid = devid;
2800 early_ioapic_map[i].cmd_line = true;
2805 static int __init parse_ivrs_hpet(char *str)
2807 unsigned int bus, dev, fn;
2811 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2814 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2818 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2819 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2824 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2826 cmdline_maps = true;
2827 i = early_hpet_map_size++;
2828 early_hpet_map[i].id = id;
2829 early_hpet_map[i].devid = devid;
2830 early_hpet_map[i].cmd_line = true;
2835 static int __init parse_ivrs_acpihid(char *str)
2838 char *hid, *uid, *p;
2839 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2842 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2844 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2849 hid = strsep(&p, ":");
2852 if (!hid || !(*hid) || !uid) {
2853 pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2857 i = early_acpihid_map_size++;
2858 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2859 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2860 early_acpihid_map[i].devid =
2861 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2862 early_acpihid_map[i].cmd_line = true;
2867 __setup("amd_iommu_dump", parse_amd_iommu_dump);
2868 __setup("amd_iommu=", parse_amd_iommu_options);
2869 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
2870 __setup("ivrs_ioapic", parse_ivrs_ioapic);
2871 __setup("ivrs_hpet", parse_ivrs_hpet);
2872 __setup("ivrs_acpihid", parse_ivrs_acpihid);
2874 IOMMU_INIT_FINISH(amd_iommu_detect,
2875 gart_iommu_hole_init,
2879 bool amd_iommu_v2_supported(void)
2881 return amd_iommu_v2_present;
2883 EXPORT_SYMBOL(amd_iommu_v2_supported);
2885 struct amd_iommu *get_amd_iommu(unsigned int idx)
2888 struct amd_iommu *iommu;
2890 for_each_iommu(iommu)
2895 EXPORT_SYMBOL(get_amd_iommu);
2897 /****************************************************************************
2899 * IOMMU EFR Performance Counter support functionality. This code allows
2900 * access to the IOMMU PC functionality.
2902 ****************************************************************************/
2904 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
2906 struct amd_iommu *iommu = get_amd_iommu(idx);
2909 return iommu->max_banks;
2913 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2915 bool amd_iommu_pc_supported(void)
2917 return amd_iommu_pc_present;
2919 EXPORT_SYMBOL(amd_iommu_pc_supported);
2921 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
2923 struct amd_iommu *iommu = get_amd_iommu(idx);
2926 return iommu->max_counters;
2930 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2932 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
2933 u8 fxn, u64 *value, bool is_write)
2938 /* Make sure the IOMMU PC resource is available */
2939 if (!amd_iommu_pc_present)
2942 /* Check for valid iommu and pc register indexing */
2943 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
2946 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
2948 /* Limit the offset to the hw defined mmio region aperture */
2949 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
2950 (iommu->max_counters << 8) | 0x28);
2951 if ((offset < MMIO_CNTR_REG_OFFSET) ||
2952 (offset > max_offset_lim))
2956 u64 val = *value & GENMASK_ULL(47, 0);
2958 writel((u32)val, iommu->mmio_base + offset);
2959 writel((val >> 32), iommu->mmio_base + offset + 4);
2961 *value = readl(iommu->mmio_base + offset + 4);
2963 *value |= readl(iommu->mmio_base + offset);
2964 *value &= GENMASK_ULL(47, 0);
2970 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
2975 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
2977 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
2979 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
2984 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
2986 EXPORT_SYMBOL(amd_iommu_pc_set_reg);