1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/kmemleak.h>
22 #include <linux/mem_encrypt.h>
23 #include <asm/pci-direct.h>
24 #include <asm/iommu.h>
26 #include <asm/msidef.h>
28 #include <asm/x86_init.h>
29 #include <asm/iommu_table.h>
30 #include <asm/io_apic.h>
31 #include <asm/irq_remapping.h>
33 #include <linux/crash_dump.h>
35 #include "amd_iommu.h"
36 #include "../irq_remapping.h"
39 * definitions for the ACPI scanning code
41 #define IVRS_HEADER_LENGTH 48
43 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
44 #define ACPI_IVMD_TYPE_ALL 0x20
45 #define ACPI_IVMD_TYPE 0x21
46 #define ACPI_IVMD_TYPE_RANGE 0x22
48 #define IVHD_DEV_ALL 0x01
49 #define IVHD_DEV_SELECT 0x02
50 #define IVHD_DEV_SELECT_RANGE_START 0x03
51 #define IVHD_DEV_RANGE_END 0x04
52 #define IVHD_DEV_ALIAS 0x42
53 #define IVHD_DEV_ALIAS_RANGE 0x43
54 #define IVHD_DEV_EXT_SELECT 0x46
55 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
56 #define IVHD_DEV_SPECIAL 0x48
57 #define IVHD_DEV_ACPI_HID 0xf0
59 #define UID_NOT_PRESENT 0
60 #define UID_IS_INTEGER 1
61 #define UID_IS_CHARACTER 2
63 #define IVHD_SPECIAL_IOAPIC 1
64 #define IVHD_SPECIAL_HPET 2
66 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
67 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
68 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
69 #define IVHD_FLAG_ISOC_EN_MASK 0x08
71 #define IVMD_FLAG_EXCL_RANGE 0x08
72 #define IVMD_FLAG_IW 0x04
73 #define IVMD_FLAG_IR 0x02
74 #define IVMD_FLAG_UNITY_MAP 0x01
76 #define ACPI_DEVFLAG_INITPASS 0x01
77 #define ACPI_DEVFLAG_EXTINT 0x02
78 #define ACPI_DEVFLAG_NMI 0x04
79 #define ACPI_DEVFLAG_SYSMGT1 0x10
80 #define ACPI_DEVFLAG_SYSMGT2 0x20
81 #define ACPI_DEVFLAG_LINT0 0x40
82 #define ACPI_DEVFLAG_LINT1 0x80
83 #define ACPI_DEVFLAG_ATSDIS 0x10000000
85 #define LOOP_TIMEOUT 100000
87 * ACPI table definitions
89 * These data structures are laid over the table to parse the important values
93 extern const struct iommu_ops amd_iommu_ops;
96 * structure describing one IOMMU in the ACPI table. Typically followed by one
97 * or more ivhd_entrys.
110 /* Following only valid on IVHD type 11h and 40h */
111 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
113 } __attribute__((packed));
116 * A device entry describing which devices a specific IOMMU translates and
117 * which requestor ids they use.
129 } __attribute__((packed));
132 * An AMD IOMMU memory definition structure. It defines things like exclusion
133 * ranges for devices and regions that should be unity mapped.
144 } __attribute__((packed));
147 bool amd_iommu_irq_remap __read_mostly;
149 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
150 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
152 static bool amd_iommu_detected;
153 static bool __initdata amd_iommu_disabled;
154 static int amd_iommu_target_ivhd_type;
156 u16 amd_iommu_last_bdf; /* largest PCI device id we have
158 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
160 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
162 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
165 /* Array to assign indices to IOMMUs*/
166 struct amd_iommu *amd_iommus[MAX_IOMMUS];
168 /* Number of IOMMUs present in the system */
169 static int amd_iommus_present;
171 /* IOMMUs have a non-present cache? */
172 bool amd_iommu_np_cache __read_mostly;
173 bool amd_iommu_iotlb_sup __read_mostly = true;
175 u32 amd_iommu_max_pasid __read_mostly = ~0;
177 bool amd_iommu_v2_present __read_mostly;
178 static bool amd_iommu_pc_present __read_mostly;
180 bool amd_iommu_force_isolation __read_mostly;
183 * Pointer to the device table which is shared by all AMD IOMMUs
184 * it is indexed by the PCI device id or the HT unit id and contains
185 * information about the domain the device belongs to as well as the
186 * page table root pointer.
188 struct dev_table_entry *amd_iommu_dev_table;
190 * Pointer to a device table which the content of old device table
191 * will be copied to. It's only be used in kdump kernel.
193 static struct dev_table_entry *old_dev_tbl_cpy;
196 * The alias table is a driver specific data structure which contains the
197 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
198 * More than one device can share the same requestor id.
200 u16 *amd_iommu_alias_table;
203 * The rlookup table is used to find the IOMMU which is responsible
204 * for a specific device. It is also indexed by the PCI device id.
206 struct amd_iommu **amd_iommu_rlookup_table;
207 EXPORT_SYMBOL(amd_iommu_rlookup_table);
210 * This table is used to find the irq remapping table for a given device id
213 struct irq_remap_table **irq_lookup_table;
216 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
217 * to know which ones are already in use.
219 unsigned long *amd_iommu_pd_alloc_bitmap;
221 static u32 dev_table_size; /* size of the device table */
222 static u32 alias_table_size; /* size of the alias table */
223 static u32 rlookup_table_size; /* size if the rlookup table */
225 enum iommu_init_state {
236 IOMMU_CMDLINE_DISABLED,
239 /* Early ioapic and hpet maps from kernel command line */
240 #define EARLY_MAP_SIZE 4
241 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
242 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
243 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
245 static int __initdata early_ioapic_map_size;
246 static int __initdata early_hpet_map_size;
247 static int __initdata early_acpihid_map_size;
249 static bool __initdata cmdline_maps;
251 static enum iommu_init_state init_state = IOMMU_START_STATE;
253 static int amd_iommu_enable_interrupts(void);
254 static int __init iommu_go_to_state(enum iommu_init_state state);
255 static void init_device_table_dma(void);
257 static bool amd_iommu_pre_enabled = true;
259 bool translation_pre_enabled(struct amd_iommu *iommu)
261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
263 EXPORT_SYMBOL(translation_pre_enabled);
265 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
270 static void init_translation_status(struct amd_iommu *iommu)
274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
275 if (ctrl & (1<<CONTROL_IOMMU_EN))
276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
279 static inline void update_last_devid(u16 devid)
281 if (devid > amd_iommu_last_bdf)
282 amd_iommu_last_bdf = devid;
285 static inline unsigned long tbl_size(int entry_size)
287 unsigned shift = PAGE_SHIFT +
288 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
293 int amd_iommu_get_num_iommus(void)
295 return amd_iommus_present;
298 /* Access to l1 and l2 indexed register spaces */
300 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
305 pci_read_config_dword(iommu->dev, 0xfc, &val);
309 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
311 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
312 pci_write_config_dword(iommu->dev, 0xfc, val);
313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
316 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
320 pci_write_config_dword(iommu->dev, 0xf0, address);
321 pci_read_config_dword(iommu->dev, 0xf4, &val);
325 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
327 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
328 pci_write_config_dword(iommu->dev, 0xf4, val);
331 /****************************************************************************
333 * AMD IOMMU MMIO register space handling functions
335 * These functions are used to program the IOMMU device registers in
336 * MMIO space required for that driver.
338 ****************************************************************************/
341 * This function set the exclusion range in the IOMMU. DMA accesses to the
342 * exclusion range are passed through untranslated
344 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
346 u64 start = iommu->exclusion_start & PAGE_MASK;
347 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
350 if (!iommu->exclusion_start)
353 entry = start | MMIO_EXCL_ENABLE_MASK;
354 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
355 &entry, sizeof(entry));
358 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
359 &entry, sizeof(entry));
362 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
364 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
365 u64 entry = start & PM_ADDR_MASK;
367 if (!iommu_feature(iommu, FEATURE_SNP))
371 * Re-purpose Exclusion base/limit registers for Completion wait
372 * write-back base/limit.
374 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
375 &entry, sizeof(entry));
378 * Default to 4 Kbytes, which can be specified by setting base
379 * address equal to the limit address.
381 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
382 &entry, sizeof(entry));
385 /* Programs the physical address of the device table into the IOMMU hardware */
386 static void iommu_set_device_table(struct amd_iommu *iommu)
390 BUG_ON(iommu->mmio_base == NULL);
392 entry = iommu_virt_to_phys(amd_iommu_dev_table);
393 entry |= (dev_table_size >> 12) - 1;
394 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
395 &entry, sizeof(entry));
398 /* Generic functions to enable/disable certain features of the IOMMU. */
399 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
403 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
404 ctrl |= (1ULL << bit);
405 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
408 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
412 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
413 ctrl &= ~(1ULL << bit);
414 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
417 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
421 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
422 ctrl &= ~CTRL_INV_TO_MASK;
423 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
424 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
427 /* Function to enable the hardware */
428 static void iommu_enable(struct amd_iommu *iommu)
430 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
433 static void iommu_disable(struct amd_iommu *iommu)
435 if (!iommu->mmio_base)
438 /* Disable command buffer */
439 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
441 /* Disable event logging and event interrupts */
442 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
443 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
445 /* Disable IOMMU GA_LOG */
446 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
447 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
449 /* Disable IOMMU hardware itself */
450 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
454 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
455 * the system has one.
457 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
459 if (!request_mem_region(address, end, "amd_iommu")) {
460 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
462 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
466 return (u8 __iomem *)ioremap(address, end);
469 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
471 if (iommu->mmio_base)
472 iounmap(iommu->mmio_base);
473 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
476 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
492 /****************************************************************************
494 * The functions below belong to the first pass of AMD IOMMU ACPI table
495 * parsing. In this pass we try to find out the highest device id this
496 * code has to handle. Upon this information the size of the shared data
497 * structures is determined later.
499 ****************************************************************************/
502 * This function calculates the length of a given IVHD entry
504 static inline int ivhd_entry_length(u8 *ivhd)
506 u32 type = ((struct ivhd_entry *)ivhd)->type;
509 return 0x04 << (*ivhd >> 6);
510 } else if (type == IVHD_DEV_ACPI_HID) {
511 /* For ACPI_HID, offset 21 is uid len */
512 return *((u8 *)ivhd + 21) + 22;
518 * After reading the highest device id from the IOMMU PCI capability header
519 * this function looks if there is a higher device id defined in the ACPI table
521 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
523 u8 *p = (void *)h, *end = (void *)h;
524 struct ivhd_entry *dev;
526 u32 ivhd_size = get_ivhd_header_size(h);
529 pr_err("Unsupported IVHD type %#x\n", h->type);
537 dev = (struct ivhd_entry *)p;
540 /* Use maximum BDF value for DEV_ALL */
541 update_last_devid(0xffff);
543 case IVHD_DEV_SELECT:
544 case IVHD_DEV_RANGE_END:
546 case IVHD_DEV_EXT_SELECT:
547 /* all the above subfield types refer to device ids */
548 update_last_devid(dev->devid);
553 p += ivhd_entry_length(p);
561 static int __init check_ivrs_checksum(struct acpi_table_header *table)
564 u8 checksum = 0, *p = (u8 *)table;
566 for (i = 0; i < table->length; ++i)
569 /* ACPI table corrupt */
570 pr_err(FW_BUG "IVRS invalid checksum\n");
578 * Iterate over all IVHD entries in the ACPI table and find the highest device
579 * id which we need to handle. This is the first of three functions which parse
580 * the ACPI table. So we check the checksum here.
582 static int __init find_last_devid_acpi(struct acpi_table_header *table)
584 u8 *p = (u8 *)table, *end = (u8 *)table;
585 struct ivhd_header *h;
587 p += IVRS_HEADER_LENGTH;
589 end += table->length;
591 h = (struct ivhd_header *)p;
592 if (h->type == amd_iommu_target_ivhd_type) {
593 int ret = find_last_devid_from_ivhd(h);
605 /****************************************************************************
607 * The following functions belong to the code path which parses the ACPI table
608 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
609 * data structures, initialize the device/alias/rlookup table and also
610 * basically initialize the hardware.
612 ****************************************************************************/
615 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
616 * write commands to that buffer later and the IOMMU will execute them
619 static int __init alloc_command_buffer(struct amd_iommu *iommu)
621 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
622 get_order(CMD_BUFFER_SIZE));
624 return iommu->cmd_buf ? 0 : -ENOMEM;
628 * This function resets the command buffer if the IOMMU stopped fetching
631 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
633 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
635 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
636 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
637 iommu->cmd_buf_head = 0;
638 iommu->cmd_buf_tail = 0;
640 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
644 * This function writes the command buffer address to the hardware and
647 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
651 BUG_ON(iommu->cmd_buf == NULL);
653 entry = iommu_virt_to_phys(iommu->cmd_buf);
654 entry |= MMIO_CMD_SIZE_512;
656 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
657 &entry, sizeof(entry));
659 amd_iommu_reset_cmd_buffer(iommu);
663 * This function disables the command buffer
665 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
667 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
670 static void __init free_command_buffer(struct amd_iommu *iommu)
672 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
675 /* allocates the memory where the IOMMU will log its events to */
676 static int __init alloc_event_buffer(struct amd_iommu *iommu)
678 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
679 get_order(EVT_BUFFER_SIZE));
681 return iommu->evt_buf ? 0 : -ENOMEM;
684 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
688 BUG_ON(iommu->evt_buf == NULL);
690 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
692 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
693 &entry, sizeof(entry));
695 /* set head and tail to zero manually */
696 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
697 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
699 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
703 * This function disables the event log buffer
705 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
707 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
710 static void __init free_event_buffer(struct amd_iommu *iommu)
712 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
715 /* allocates the memory where the IOMMU will log its events to */
716 static int __init alloc_ppr_log(struct amd_iommu *iommu)
718 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
719 get_order(PPR_LOG_SIZE));
721 return iommu->ppr_log ? 0 : -ENOMEM;
724 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
728 if (iommu->ppr_log == NULL)
731 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
733 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
734 &entry, sizeof(entry));
736 /* set head and tail to zero manually */
737 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
738 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
740 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
741 iommu_feature_enable(iommu, CONTROL_PPR_EN);
744 static void __init free_ppr_log(struct amd_iommu *iommu)
746 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
749 static void free_ga_log(struct amd_iommu *iommu)
751 #ifdef CONFIG_IRQ_REMAP
752 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
753 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
757 static int iommu_ga_log_enable(struct amd_iommu *iommu)
759 #ifdef CONFIG_IRQ_REMAP
765 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
767 /* Check if already running */
768 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
771 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
772 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
774 for (i = 0; i < LOOP_TIMEOUT; ++i) {
775 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
776 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
780 if (i >= LOOP_TIMEOUT)
782 #endif /* CONFIG_IRQ_REMAP */
786 #ifdef CONFIG_IRQ_REMAP
787 static int iommu_init_ga_log(struct amd_iommu *iommu)
791 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
794 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
795 get_order(GA_LOG_SIZE));
799 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
801 if (!iommu->ga_log_tail)
804 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
805 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
806 &entry, sizeof(entry));
807 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
808 (BIT_ULL(52)-1)) & ~7ULL;
809 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
810 &entry, sizeof(entry));
811 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
812 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
819 #endif /* CONFIG_IRQ_REMAP */
821 static int iommu_init_ga(struct amd_iommu *iommu)
825 #ifdef CONFIG_IRQ_REMAP
826 /* Note: We have already checked GASup from IVRS table.
827 * Now, we need to make sure that GAMSup is set.
829 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
830 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
831 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
833 ret = iommu_init_ga_log(iommu);
834 #endif /* CONFIG_IRQ_REMAP */
839 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
841 iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
843 return iommu->cmd_sem ? 0 : -ENOMEM;
846 static void __init free_cwwb_sem(struct amd_iommu *iommu)
849 free_page((unsigned long)iommu->cmd_sem);
852 static void iommu_enable_xt(struct amd_iommu *iommu)
854 #ifdef CONFIG_IRQ_REMAP
856 * XT mode (32-bit APIC destination ID) requires
857 * GA mode (128-bit IRTE support) as a prerequisite.
859 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
860 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
861 iommu_feature_enable(iommu, CONTROL_XT_EN);
862 #endif /* CONFIG_IRQ_REMAP */
865 static void iommu_enable_gt(struct amd_iommu *iommu)
867 if (!iommu_feature(iommu, FEATURE_GT))
870 iommu_feature_enable(iommu, CONTROL_GT_EN);
873 /* sets a specific bit in the device table entry. */
874 static void set_dev_entry_bit(u16 devid, u8 bit)
876 int i = (bit >> 6) & 0x03;
877 int _bit = bit & 0x3f;
879 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
882 static int get_dev_entry_bit(u16 devid, u8 bit)
884 int i = (bit >> 6) & 0x03;
885 int _bit = bit & 0x3f;
887 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
891 static bool copy_device_table(void)
893 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
894 struct dev_table_entry *old_devtb = NULL;
895 u32 lo, hi, devid, old_devtb_size;
896 phys_addr_t old_devtb_phys;
897 struct amd_iommu *iommu;
898 u16 dom_id, dte_v, irq_v;
902 if (!amd_iommu_pre_enabled)
905 pr_warn("Translation is already enabled - trying to copy translation structures\n");
906 for_each_iommu(iommu) {
907 /* All IOMMUs should use the same device table with the same size */
908 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
909 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
910 entry = (((u64) hi) << 32) + lo;
911 if (last_entry && last_entry != entry) {
912 pr_err("IOMMU:%d should use the same dev table as others!\n",
918 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
919 if (old_devtb_size != dev_table_size) {
920 pr_err("The device table size of IOMMU:%d is not expected!\n",
927 * When SME is enabled in the first kernel, the entry includes the
928 * memory encryption mask(sme_me_mask), we must remove the memory
929 * encryption mask to obtain the true physical address in kdump kernel.
931 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
933 if (old_devtb_phys >= 0x100000000ULL) {
934 pr_err("The address of old device table is above 4G, not trustworthy!\n");
937 old_devtb = (sme_active() && is_kdump_kernel())
938 ? (__force void *)ioremap_encrypted(old_devtb_phys,
940 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
945 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
946 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
947 get_order(dev_table_size));
948 if (old_dev_tbl_cpy == NULL) {
949 pr_err("Failed to allocate memory for copying old device table!\n");
953 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
954 old_dev_tbl_cpy[devid] = old_devtb[devid];
955 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
956 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
958 if (dte_v && dom_id) {
959 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
960 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
961 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
962 /* If gcr3 table existed, mask it out */
963 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
964 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
965 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
966 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
967 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
969 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
973 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
974 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
975 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
976 if (irq_v && (int_ctl || int_tab_len)) {
977 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
978 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
979 pr_err("Wrong old irq remapping flag: %#x\n", devid);
983 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
991 void amd_iommu_apply_erratum_63(u16 devid)
995 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
996 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
999 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1002 /* Writes the specific IOMMU for a device into the rlookup table */
1003 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1005 amd_iommu_rlookup_table[devid] = iommu;
1009 * This function takes the device specific flags read from the ACPI
1010 * table and sets up the device table entry with that information
1012 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1013 u16 devid, u32 flags, u32 ext_flags)
1015 if (flags & ACPI_DEVFLAG_INITPASS)
1016 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1017 if (flags & ACPI_DEVFLAG_EXTINT)
1018 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1019 if (flags & ACPI_DEVFLAG_NMI)
1020 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1021 if (flags & ACPI_DEVFLAG_SYSMGT1)
1022 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1023 if (flags & ACPI_DEVFLAG_SYSMGT2)
1024 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1025 if (flags & ACPI_DEVFLAG_LINT0)
1026 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1027 if (flags & ACPI_DEVFLAG_LINT1)
1028 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1030 amd_iommu_apply_erratum_63(devid);
1032 set_iommu_for_device(iommu, devid);
1035 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1037 struct devid_map *entry;
1038 struct list_head *list;
1040 if (type == IVHD_SPECIAL_IOAPIC)
1042 else if (type == IVHD_SPECIAL_HPET)
1047 list_for_each_entry(entry, list, list) {
1048 if (!(entry->id == id && entry->cmd_line))
1051 pr_info("Command-line override present for %s id %d - ignoring\n",
1052 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1054 *devid = entry->devid;
1059 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1064 entry->devid = *devid;
1065 entry->cmd_line = cmd_line;
1067 list_add_tail(&entry->list, list);
1072 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1075 struct acpihid_map_entry *entry;
1076 struct list_head *list = &acpihid_map;
1078 list_for_each_entry(entry, list, list) {
1079 if (strcmp(entry->hid, hid) ||
1080 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1084 pr_info("Command-line override for hid:%s uid:%s\n",
1086 *devid = entry->devid;
1090 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1094 memcpy(entry->uid, uid, strlen(uid));
1095 memcpy(entry->hid, hid, strlen(hid));
1096 entry->devid = *devid;
1097 entry->cmd_line = cmd_line;
1098 entry->root_devid = (entry->devid & (~0x7));
1100 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1101 entry->cmd_line ? "cmd" : "ivrs",
1102 entry->hid, entry->uid, entry->root_devid);
1104 list_add_tail(&entry->list, list);
1108 static int __init add_early_maps(void)
1112 for (i = 0; i < early_ioapic_map_size; ++i) {
1113 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1114 early_ioapic_map[i].id,
1115 &early_ioapic_map[i].devid,
1116 early_ioapic_map[i].cmd_line);
1121 for (i = 0; i < early_hpet_map_size; ++i) {
1122 ret = add_special_device(IVHD_SPECIAL_HPET,
1123 early_hpet_map[i].id,
1124 &early_hpet_map[i].devid,
1125 early_hpet_map[i].cmd_line);
1130 for (i = 0; i < early_acpihid_map_size; ++i) {
1131 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1132 early_acpihid_map[i].uid,
1133 &early_acpihid_map[i].devid,
1134 early_acpihid_map[i].cmd_line);
1143 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1144 * initializes the hardware and our data structures with it.
1146 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1147 struct ivhd_header *h)
1150 u8 *end = p, flags = 0;
1151 u16 devid = 0, devid_start = 0, devid_to = 0;
1152 u32 dev_i, ext_flags = 0;
1154 struct ivhd_entry *e;
1159 ret = add_early_maps();
1163 amd_iommu_apply_ivrs_quirks();
1166 * First save the recommended feature enable bits from ACPI
1168 iommu->acpi_flags = h->flags;
1171 * Done. Now parse the device entries
1173 ivhd_size = get_ivhd_header_size(h);
1175 pr_err("Unsupported IVHD type %#x\n", h->type);
1185 e = (struct ivhd_entry *)p;
1189 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1191 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1192 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1194 case IVHD_DEV_SELECT:
1196 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1198 PCI_BUS_NUM(e->devid),
1204 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1206 case IVHD_DEV_SELECT_RANGE_START:
1208 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1209 "devid: %02x:%02x.%x flags: %02x\n",
1210 PCI_BUS_NUM(e->devid),
1215 devid_start = e->devid;
1220 case IVHD_DEV_ALIAS:
1222 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1223 "flags: %02x devid_to: %02x:%02x.%x\n",
1224 PCI_BUS_NUM(e->devid),
1228 PCI_BUS_NUM(e->ext >> 8),
1229 PCI_SLOT(e->ext >> 8),
1230 PCI_FUNC(e->ext >> 8));
1233 devid_to = e->ext >> 8;
1234 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1235 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1236 amd_iommu_alias_table[devid] = devid_to;
1238 case IVHD_DEV_ALIAS_RANGE:
1240 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1241 "devid: %02x:%02x.%x flags: %02x "
1242 "devid_to: %02x:%02x.%x\n",
1243 PCI_BUS_NUM(e->devid),
1247 PCI_BUS_NUM(e->ext >> 8),
1248 PCI_SLOT(e->ext >> 8),
1249 PCI_FUNC(e->ext >> 8));
1251 devid_start = e->devid;
1253 devid_to = e->ext >> 8;
1257 case IVHD_DEV_EXT_SELECT:
1259 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1260 "flags: %02x ext: %08x\n",
1261 PCI_BUS_NUM(e->devid),
1267 set_dev_entry_from_acpi(iommu, devid, e->flags,
1270 case IVHD_DEV_EXT_SELECT_RANGE:
1272 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1273 "%02x:%02x.%x flags: %02x ext: %08x\n",
1274 PCI_BUS_NUM(e->devid),
1279 devid_start = e->devid;
1284 case IVHD_DEV_RANGE_END:
1286 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1287 PCI_BUS_NUM(e->devid),
1289 PCI_FUNC(e->devid));
1292 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1294 amd_iommu_alias_table[dev_i] = devid_to;
1295 set_dev_entry_from_acpi(iommu,
1296 devid_to, flags, ext_flags);
1298 set_dev_entry_from_acpi(iommu, dev_i,
1302 case IVHD_DEV_SPECIAL: {
1308 handle = e->ext & 0xff;
1309 devid = (e->ext >> 8) & 0xffff;
1310 type = (e->ext >> 24) & 0xff;
1312 if (type == IVHD_SPECIAL_IOAPIC)
1314 else if (type == IVHD_SPECIAL_HPET)
1319 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1325 ret = add_special_device(type, handle, &devid, false);
1330 * add_special_device might update the devid in case a
1331 * command-line override is present. So call
1332 * set_dev_entry_from_acpi after add_special_device.
1334 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1338 case IVHD_DEV_ACPI_HID: {
1340 u8 hid[ACPIHID_HID_LEN];
1341 u8 uid[ACPIHID_UID_LEN];
1344 if (h->type != 0x40) {
1345 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1350 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1351 hid[ACPIHID_HID_LEN - 1] = '\0';
1354 pr_err(FW_BUG "Invalid HID.\n");
1360 case UID_NOT_PRESENT:
1363 pr_warn(FW_BUG "Invalid UID length.\n");
1366 case UID_IS_INTEGER:
1368 sprintf(uid, "%d", e->uid);
1371 case UID_IS_CHARACTER:
1373 memcpy(uid, &e->uid, e->uidl);
1374 uid[e->uidl] = '\0';
1382 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1390 ret = add_acpi_hid_device(hid, uid, &devid, false);
1395 * add_special_device might update the devid in case a
1396 * command-line override is present. So call
1397 * set_dev_entry_from_acpi after add_special_device.
1399 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1407 p += ivhd_entry_length(p);
1413 static void __init free_iommu_one(struct amd_iommu *iommu)
1415 free_cwwb_sem(iommu);
1416 free_command_buffer(iommu);
1417 free_event_buffer(iommu);
1418 free_ppr_log(iommu);
1420 iommu_unmap_mmio_space(iommu);
1423 static void __init free_iommu_all(void)
1425 struct amd_iommu *iommu, *next;
1427 for_each_iommu_safe(iommu, next) {
1428 list_del(&iommu->list);
1429 free_iommu_one(iommu);
1435 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1437 * BIOS should disable L2B micellaneous clock gating by setting
1438 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1440 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1444 if ((boot_cpu_data.x86 != 0x15) ||
1445 (boot_cpu_data.x86_model < 0x10) ||
1446 (boot_cpu_data.x86_model > 0x1f))
1449 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1450 pci_read_config_dword(iommu->dev, 0xf4, &value);
1455 /* Select NB indirect register 0x90 and enable writing */
1456 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1458 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1459 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1461 /* Clear the enable writing bit */
1462 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1466 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1468 * BIOS should enable ATS write permission check by setting
1469 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1471 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1475 if ((boot_cpu_data.x86 != 0x15) ||
1476 (boot_cpu_data.x86_model < 0x30) ||
1477 (boot_cpu_data.x86_model > 0x3f))
1480 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1481 value = iommu_read_l2(iommu, 0x47);
1486 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1487 iommu_write_l2(iommu, 0x47, value | BIT(0));
1489 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1493 * This function clues the initialization function for one IOMMU
1494 * together and also allocates the command buffer and programs the
1495 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1497 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1501 raw_spin_lock_init(&iommu->lock);
1502 iommu->cmd_sem_val = 0;
1504 /* Add IOMMU to internal data structures */
1505 list_add_tail(&iommu->list, &amd_iommu_list);
1506 iommu->index = amd_iommus_present++;
1508 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1509 WARN(1, "System has more IOMMUs than supported by this driver\n");
1513 /* Index is fine - add IOMMU to the array */
1514 amd_iommus[iommu->index] = iommu;
1517 * Copy data from ACPI table entry to the iommu struct
1519 iommu->devid = h->devid;
1520 iommu->cap_ptr = h->cap_ptr;
1521 iommu->pci_seg = h->pci_seg;
1522 iommu->mmio_phys = h->mmio_phys;
1526 /* Check if IVHD EFR contains proper max banks/counters */
1527 if ((h->efr_attr != 0) &&
1528 ((h->efr_attr & (0xF << 13)) != 0) &&
1529 ((h->efr_attr & (0x3F << 17)) != 0))
1530 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1532 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1535 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1536 * GAM also requires GA mode. Therefore, we need to
1537 * check cmpxchg16b support before enabling it.
1539 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1540 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1541 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1545 if (h->efr_reg & (1 << 9))
1546 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1548 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1551 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1552 * XT, GAM also requires GA mode. Therefore, we need to
1553 * check cmpxchg16b support before enabling them.
1555 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1556 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1557 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1562 * Note: Since iommu_update_intcapxt() leverages
1563 * the IOMMU MMIO access to MSI capability block registers
1564 * for MSI address lo/hi/data, we need to check both
1565 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1567 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1568 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1569 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1575 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1576 iommu->mmio_phys_end);
1577 if (!iommu->mmio_base)
1580 if (alloc_cwwb_sem(iommu))
1583 if (alloc_command_buffer(iommu))
1586 if (alloc_event_buffer(iommu))
1589 iommu->int_enabled = false;
1591 init_translation_status(iommu);
1592 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1593 iommu_disable(iommu);
1594 clear_translation_pre_enabled(iommu);
1595 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1598 if (amd_iommu_pre_enabled)
1599 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1601 ret = init_iommu_from_acpi(iommu, h);
1605 ret = amd_iommu_create_irq_domain(iommu);
1610 * Make sure IOMMU is not considered to translate itself. The IVRS
1611 * table tells us so, but this is a lie!
1613 amd_iommu_rlookup_table[iommu->devid] = NULL;
1619 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1620 * @ivrs: Pointer to the IVRS header
1622 * This function search through all IVDB of the maximum supported IVHD
1624 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1626 u8 *base = (u8 *)ivrs;
1627 struct ivhd_header *ivhd = (struct ivhd_header *)
1628 (base + IVRS_HEADER_LENGTH);
1629 u8 last_type = ivhd->type;
1630 u16 devid = ivhd->devid;
1632 while (((u8 *)ivhd - base < ivrs->length) &&
1633 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1634 u8 *p = (u8 *) ivhd;
1636 if (ivhd->devid == devid)
1637 last_type = ivhd->type;
1638 ivhd = (struct ivhd_header *)(p + ivhd->length);
1645 * Iterates over all IOMMU entries in the ACPI table, allocates the
1646 * IOMMU structure and initializes it with init_iommu_one()
1648 static int __init init_iommu_all(struct acpi_table_header *table)
1650 u8 *p = (u8 *)table, *end = (u8 *)table;
1651 struct ivhd_header *h;
1652 struct amd_iommu *iommu;
1655 end += table->length;
1656 p += IVRS_HEADER_LENGTH;
1659 h = (struct ivhd_header *)p;
1660 if (*p == amd_iommu_target_ivhd_type) {
1662 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1663 "seg: %d flags: %01x info %04x\n",
1664 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1665 PCI_FUNC(h->devid), h->cap_ptr,
1666 h->pci_seg, h->flags, h->info);
1667 DUMP_printk(" mmio-addr: %016llx\n",
1670 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1674 ret = init_iommu_one(iommu, h);
1686 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1687 u8 fxn, u64 *value, bool is_write);
1689 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1691 struct pci_dev *pdev = iommu->dev;
1692 u64 val = 0xabcd, val2 = 0, save_reg = 0;
1694 if (!iommu_feature(iommu, FEATURE_PC))
1697 amd_iommu_pc_present = true;
1699 /* save the value to restore, if writable */
1700 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1703 /* Check if the performance counters can be written to */
1704 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1705 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1710 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1713 pci_info(pdev, "IOMMU performance counters supported\n");
1715 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1716 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1717 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1722 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1723 amd_iommu_pc_present = false;
1727 static ssize_t amd_iommu_show_cap(struct device *dev,
1728 struct device_attribute *attr,
1731 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1732 return sprintf(buf, "%x\n", iommu->cap);
1734 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1736 static ssize_t amd_iommu_show_features(struct device *dev,
1737 struct device_attribute *attr,
1740 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1741 return sprintf(buf, "%llx\n", iommu->features);
1743 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1745 static struct attribute *amd_iommu_attrs[] = {
1747 &dev_attr_features.attr,
1751 static struct attribute_group amd_iommu_group = {
1752 .name = "amd-iommu",
1753 .attrs = amd_iommu_attrs,
1756 static const struct attribute_group *amd_iommu_groups[] = {
1761 static int __init iommu_init_pci(struct amd_iommu *iommu)
1763 int cap_ptr = iommu->cap_ptr;
1766 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1767 iommu->devid & 0xff);
1771 /* Prevent binding other PCI device drivers to IOMMU devices */
1772 iommu->dev->match_driver = false;
1774 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1777 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1778 amd_iommu_iotlb_sup = false;
1780 /* read extended feature bits */
1781 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1783 if (iommu_feature(iommu, FEATURE_GT)) {
1788 pasmax = iommu->features & FEATURE_PASID_MASK;
1789 pasmax >>= FEATURE_PASID_SHIFT;
1790 max_pasid = (1 << (pasmax + 1)) - 1;
1792 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1794 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1796 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1797 glxval >>= FEATURE_GLXVAL_SHIFT;
1799 if (amd_iommu_max_glx_val == -1)
1800 amd_iommu_max_glx_val = glxval;
1802 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1805 if (iommu_feature(iommu, FEATURE_GT) &&
1806 iommu_feature(iommu, FEATURE_PPR)) {
1807 iommu->is_iommu_v2 = true;
1808 amd_iommu_v2_present = true;
1811 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1814 ret = iommu_init_ga(iommu);
1818 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1819 amd_iommu_np_cache = true;
1821 init_iommu_perf_ctr(iommu);
1823 if (is_rd890_iommu(iommu->dev)) {
1827 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1831 * Some rd890 systems may not be fully reconfigured by the
1832 * BIOS, so it's necessary for us to store this information so
1833 * it can be reprogrammed on resume
1835 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1836 &iommu->stored_addr_lo);
1837 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1838 &iommu->stored_addr_hi);
1840 /* Low bit locks writes to configuration space */
1841 iommu->stored_addr_lo &= ~1;
1843 for (i = 0; i < 6; i++)
1844 for (j = 0; j < 0x12; j++)
1845 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1847 for (i = 0; i < 0x83; i++)
1848 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1851 amd_iommu_erratum_746_workaround(iommu);
1852 amd_iommu_ats_write_check_workaround(iommu);
1854 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1855 amd_iommu_groups, "ivhd%d", iommu->index);
1856 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1857 iommu_device_register(&iommu->iommu);
1859 return pci_enable_device(iommu->dev);
1862 static void print_iommu_info(void)
1864 static const char * const feat_str[] = {
1865 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1866 "IA", "GA", "HE", "PC"
1868 struct amd_iommu *iommu;
1870 for_each_iommu(iommu) {
1871 struct pci_dev *pdev = iommu->dev;
1874 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1876 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1877 pci_info(pdev, "Extended features (%#llx):",
1879 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1880 if (iommu_feature(iommu, (1ULL << i)))
1881 pr_cont(" %s", feat_str[i]);
1884 if (iommu->features & FEATURE_GAM_VAPIC)
1885 pr_cont(" GA_vAPIC");
1890 if (irq_remapping_enabled) {
1891 pr_info("Interrupt remapping enabled\n");
1892 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1893 pr_info("Virtual APIC enabled\n");
1894 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1895 pr_info("X2APIC enabled\n");
1899 static int __init amd_iommu_init_pci(void)
1901 struct amd_iommu *iommu;
1904 for_each_iommu(iommu) {
1905 ret = iommu_init_pci(iommu);
1909 /* Need to setup range after PCI init */
1910 iommu_set_cwwb_range(iommu);
1914 * Order is important here to make sure any unity map requirements are
1915 * fulfilled. The unity mappings are created and written to the device
1916 * table during the amd_iommu_init_api() call.
1918 * After that we call init_device_table_dma() to make sure any
1919 * uninitialized DTE will block DMA, and in the end we flush the caches
1920 * of all IOMMUs to make sure the changes to the device table are
1923 ret = amd_iommu_init_api();
1925 init_device_table_dma();
1927 for_each_iommu(iommu)
1928 iommu_flush_all_caches(iommu);
1936 /****************************************************************************
1938 * The following functions initialize the MSI interrupts for all IOMMUs
1939 * in the system. It's a bit challenging because there could be multiple
1940 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1943 ****************************************************************************/
1945 static int iommu_setup_msi(struct amd_iommu *iommu)
1949 r = pci_enable_msi(iommu->dev);
1953 r = request_threaded_irq(iommu->dev->irq,
1954 amd_iommu_int_handler,
1955 amd_iommu_int_thread,
1960 pci_disable_msi(iommu->dev);
1964 iommu->int_enabled = true;
1969 #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
1970 #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
1971 #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
1972 #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
1975 * Setup the IntCapXT registers with interrupt routing information
1976 * based on the PCI MSI capability block registers, accessed via
1977 * MMIO MSI address low/hi and MSI data registers.
1979 static void iommu_update_intcapxt(struct amd_iommu *iommu)
1982 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1983 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1984 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1985 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1986 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1988 if (x2apic_enabled())
1989 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1991 val = XT_INT_VEC(data & 0xFF) |
1992 XT_INT_DEST_MODE(dm) |
1993 XT_INT_DEST_LO(dest) |
1994 XT_INT_DEST_HI(dest);
1997 * Current IOMMU implemtation uses the same IRQ for all
1998 * 3 IOMMU interrupts.
2000 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2001 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2002 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2005 static void _irq_notifier_notify(struct irq_affinity_notify *notify,
2006 const cpumask_t *mask)
2008 struct amd_iommu *iommu;
2010 for_each_iommu(iommu) {
2011 if (iommu->dev->irq == notify->irq) {
2012 iommu_update_intcapxt(iommu);
2018 static void _irq_notifier_release(struct kref *ref)
2022 static int iommu_init_intcapxt(struct amd_iommu *iommu)
2025 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
2028 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
2029 * which can be inferred from amd_iommu_xt_mode.
2031 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
2035 * Also, we need to setup notifier to update the IntCapXT registers
2036 * whenever the irq affinity is changed from user-space.
2038 notify->irq = iommu->dev->irq;
2039 notify->notify = _irq_notifier_notify,
2040 notify->release = _irq_notifier_release,
2041 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2043 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2044 iommu->devid, iommu->dev->irq);
2048 iommu_update_intcapxt(iommu);
2049 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2053 static int iommu_init_msi(struct amd_iommu *iommu)
2057 if (iommu->int_enabled)
2060 if (iommu->dev->msi_cap)
2061 ret = iommu_setup_msi(iommu);
2069 ret = iommu_init_intcapxt(iommu);
2073 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2075 if (iommu->ppr_log != NULL)
2076 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2078 iommu_ga_log_enable(iommu);
2083 /****************************************************************************
2085 * The next functions belong to the third pass of parsing the ACPI
2086 * table. In this last pass the memory mapping requirements are
2087 * gathered (like exclusion and unity mapping ranges).
2089 ****************************************************************************/
2091 static void __init free_unity_maps(void)
2093 struct unity_map_entry *entry, *next;
2095 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2096 list_del(&entry->list);
2101 /* called for unity map ACPI definition */
2102 static int __init init_unity_map_range(struct ivmd_header *m)
2104 struct unity_map_entry *e = NULL;
2107 e = kzalloc(sizeof(*e), GFP_KERNEL);
2115 case ACPI_IVMD_TYPE:
2116 s = "IVMD_TYPEi\t\t\t";
2117 e->devid_start = e->devid_end = m->devid;
2119 case ACPI_IVMD_TYPE_ALL:
2120 s = "IVMD_TYPE_ALL\t\t";
2122 e->devid_end = amd_iommu_last_bdf;
2124 case ACPI_IVMD_TYPE_RANGE:
2125 s = "IVMD_TYPE_RANGE\t\t";
2126 e->devid_start = m->devid;
2127 e->devid_end = m->aux;
2130 e->address_start = PAGE_ALIGN(m->range_start);
2131 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2132 e->prot = m->flags >> 1;
2135 * Treat per-device exclusion ranges as r/w unity-mapped regions
2136 * since some buggy BIOSes might lead to the overwritten exclusion
2137 * range (exclusion_start and exclusion_length members). This
2138 * happens when there are multiple exclusion ranges (IVMD entries)
2139 * defined in ACPI table.
2141 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2142 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2144 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2145 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2146 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2147 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2148 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2149 e->address_start, e->address_end, m->flags);
2151 list_add_tail(&e->list, &amd_iommu_unity_map);
2156 /* iterates over all memory definitions we find in the ACPI table */
2157 static int __init init_memory_definitions(struct acpi_table_header *table)
2159 u8 *p = (u8 *)table, *end = (u8 *)table;
2160 struct ivmd_header *m;
2162 end += table->length;
2163 p += IVRS_HEADER_LENGTH;
2166 m = (struct ivmd_header *)p;
2167 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2168 init_unity_map_range(m);
2177 * Init the device table to not allow DMA access for devices
2179 static void init_device_table_dma(void)
2183 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2184 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2185 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2189 static void __init uninit_device_table_dma(void)
2193 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2194 amd_iommu_dev_table[devid].data[0] = 0ULL;
2195 amd_iommu_dev_table[devid].data[1] = 0ULL;
2199 static void init_device_table(void)
2203 if (!amd_iommu_irq_remap)
2206 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2207 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2210 static void iommu_init_flags(struct amd_iommu *iommu)
2212 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2213 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2214 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2216 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2217 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2218 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2220 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2221 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2222 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2224 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2225 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2226 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2229 * make IOMMU memory accesses cache coherent
2231 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2233 /* Set IOTLB invalidation timeout to 1s */
2234 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2237 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2240 u32 ioc_feature_control;
2241 struct pci_dev *pdev = iommu->root_pdev;
2243 /* RD890 BIOSes may not have completely reconfigured the iommu */
2244 if (!is_rd890_iommu(iommu->dev) || !pdev)
2248 * First, we need to ensure that the iommu is enabled. This is
2249 * controlled by a register in the northbridge
2252 /* Select Northbridge indirect register 0x75 and enable writing */
2253 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2254 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2256 /* Enable the iommu */
2257 if (!(ioc_feature_control & 0x1))
2258 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2260 /* Restore the iommu BAR */
2261 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2262 iommu->stored_addr_lo);
2263 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2264 iommu->stored_addr_hi);
2266 /* Restore the l1 indirect regs for each of the 6 l1s */
2267 for (i = 0; i < 6; i++)
2268 for (j = 0; j < 0x12; j++)
2269 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2271 /* Restore the l2 indirect regs */
2272 for (i = 0; i < 0x83; i++)
2273 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2275 /* Lock PCI setup registers */
2276 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2277 iommu->stored_addr_lo | 1);
2280 static void iommu_enable_ga(struct amd_iommu *iommu)
2282 #ifdef CONFIG_IRQ_REMAP
2283 switch (amd_iommu_guest_ir) {
2284 case AMD_IOMMU_GUEST_IR_VAPIC:
2285 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2287 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2288 iommu_feature_enable(iommu, CONTROL_GA_EN);
2289 iommu->irte_ops = &irte_128_ops;
2292 iommu->irte_ops = &irte_32_ops;
2298 static void early_enable_iommu(struct amd_iommu *iommu)
2300 iommu_disable(iommu);
2301 iommu_init_flags(iommu);
2302 iommu_set_device_table(iommu);
2303 iommu_enable_command_buffer(iommu);
2304 iommu_enable_event_buffer(iommu);
2305 iommu_set_exclusion_range(iommu);
2306 iommu_enable_ga(iommu);
2307 iommu_enable_xt(iommu);
2308 iommu_enable(iommu);
2309 iommu_flush_all_caches(iommu);
2313 * This function finally enables all IOMMUs found in the system after
2314 * they have been initialized.
2316 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2317 * the old content of device table entries. Not this case or copy failed,
2318 * just continue as normal kernel does.
2320 static void early_enable_iommus(void)
2322 struct amd_iommu *iommu;
2325 if (!copy_device_table()) {
2327 * If come here because of failure in copying device table from old
2328 * kernel with all IOMMUs enabled, print error message and try to
2329 * free allocated old_dev_tbl_cpy.
2331 if (amd_iommu_pre_enabled)
2332 pr_err("Failed to copy DEV table from previous kernel.\n");
2333 if (old_dev_tbl_cpy != NULL)
2334 free_pages((unsigned long)old_dev_tbl_cpy,
2335 get_order(dev_table_size));
2337 for_each_iommu(iommu) {
2338 clear_translation_pre_enabled(iommu);
2339 early_enable_iommu(iommu);
2342 pr_info("Copied DEV table from previous kernel.\n");
2343 free_pages((unsigned long)amd_iommu_dev_table,
2344 get_order(dev_table_size));
2345 amd_iommu_dev_table = old_dev_tbl_cpy;
2346 for_each_iommu(iommu) {
2347 iommu_disable_command_buffer(iommu);
2348 iommu_disable_event_buffer(iommu);
2349 iommu_enable_command_buffer(iommu);
2350 iommu_enable_event_buffer(iommu);
2351 iommu_enable_ga(iommu);
2352 iommu_enable_xt(iommu);
2353 iommu_set_device_table(iommu);
2354 iommu_flush_all_caches(iommu);
2358 #ifdef CONFIG_IRQ_REMAP
2359 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2360 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2364 static void enable_iommus_v2(void)
2366 struct amd_iommu *iommu;
2368 for_each_iommu(iommu) {
2369 iommu_enable_ppr_log(iommu);
2370 iommu_enable_gt(iommu);
2374 static void enable_iommus(void)
2376 early_enable_iommus();
2381 static void disable_iommus(void)
2383 struct amd_iommu *iommu;
2385 for_each_iommu(iommu)
2386 iommu_disable(iommu);
2388 #ifdef CONFIG_IRQ_REMAP
2389 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2390 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2395 * Suspend/Resume support
2396 * disable suspend until real resume implemented
2399 static void amd_iommu_resume(void)
2401 struct amd_iommu *iommu;
2403 for_each_iommu(iommu)
2404 iommu_apply_resume_quirks(iommu);
2406 /* re-load the hardware */
2409 amd_iommu_enable_interrupts();
2412 static int amd_iommu_suspend(void)
2414 /* disable IOMMUs to go out of the way for BIOS */
2420 static struct syscore_ops amd_iommu_syscore_ops = {
2421 .suspend = amd_iommu_suspend,
2422 .resume = amd_iommu_resume,
2425 static void __init free_iommu_resources(void)
2427 kmemleak_free(irq_lookup_table);
2428 free_pages((unsigned long)irq_lookup_table,
2429 get_order(rlookup_table_size));
2430 irq_lookup_table = NULL;
2432 kmem_cache_destroy(amd_iommu_irq_cache);
2433 amd_iommu_irq_cache = NULL;
2435 free_pages((unsigned long)amd_iommu_rlookup_table,
2436 get_order(rlookup_table_size));
2437 amd_iommu_rlookup_table = NULL;
2439 free_pages((unsigned long)amd_iommu_alias_table,
2440 get_order(alias_table_size));
2441 amd_iommu_alias_table = NULL;
2443 free_pages((unsigned long)amd_iommu_dev_table,
2444 get_order(dev_table_size));
2445 amd_iommu_dev_table = NULL;
2450 /* SB IOAPIC is always on this device in AMD systems */
2451 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2453 static bool __init check_ioapic_information(void)
2455 const char *fw_bug = FW_BUG;
2456 bool ret, has_sb_ioapic;
2459 has_sb_ioapic = false;
2463 * If we have map overrides on the kernel command line the
2464 * messages in this function might not describe firmware bugs
2465 * anymore - so be careful
2470 for (idx = 0; idx < nr_ioapics; idx++) {
2471 int devid, id = mpc_ioapic_id(idx);
2473 devid = get_ioapic_devid(id);
2475 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2478 } else if (devid == IOAPIC_SB_DEVID) {
2479 has_sb_ioapic = true;
2484 if (!has_sb_ioapic) {
2486 * We expect the SB IOAPIC to be listed in the IVRS
2487 * table. The system timer is connected to the SB IOAPIC
2488 * and if we don't have it in the list the system will
2489 * panic at boot time. This situation usually happens
2490 * when the BIOS is buggy and provides us the wrong
2491 * device id for the IOAPIC in the system.
2493 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2497 pr_err("Disabling interrupt remapping\n");
2502 static void __init free_dma_resources(void)
2504 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2505 get_order(MAX_DOMAIN_ID/8));
2506 amd_iommu_pd_alloc_bitmap = NULL;
2512 * This is the hardware init function for AMD IOMMU in the system.
2513 * This function is called either from amd_iommu_init or from the interrupt
2514 * remapping setup code.
2516 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2519 * 1 pass) Discover the most comprehensive IVHD type to use.
2521 * 2 pass) Find the highest PCI device id the driver has to handle.
2522 * Upon this information the size of the data structures is
2523 * determined that needs to be allocated.
2525 * 3 pass) Initialize the data structures just allocated with the
2526 * information in the ACPI table about available AMD IOMMUs
2527 * in the system. It also maps the PCI devices in the
2528 * system to specific IOMMUs
2530 * 4 pass) After the basic data structures are allocated and
2531 * initialized we update them with information about memory
2532 * remapping requirements parsed out of the ACPI table in
2535 * After everything is set up the IOMMUs are enabled and the necessary
2536 * hotplug and suspend notifiers are registered.
2538 static int __init early_amd_iommu_init(void)
2540 struct acpi_table_header *ivrs_base;
2542 int i, remap_cache_sz, ret = 0;
2545 if (!amd_iommu_detected)
2548 status = acpi_get_table("IVRS", 0, &ivrs_base);
2549 if (status == AE_NOT_FOUND)
2551 else if (ACPI_FAILURE(status)) {
2552 const char *err = acpi_format_exception(status);
2553 pr_err("IVRS table error: %s\n", err);
2558 * Validate checksum here so we don't need to do it when
2559 * we actually parse the table
2561 ret = check_ivrs_checksum(ivrs_base);
2565 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2566 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2569 * First parse ACPI tables to find the largest Bus/Dev/Func
2570 * we need to handle. Upon this information the shared data
2571 * structures for the IOMMUs in the system will be allocated
2573 ret = find_last_devid_acpi(ivrs_base);
2577 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2578 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2579 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2581 /* Device table - directly used by all IOMMUs */
2583 amd_iommu_dev_table = (void *)__get_free_pages(
2584 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2585 get_order(dev_table_size));
2586 if (amd_iommu_dev_table == NULL)
2590 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2591 * IOMMU see for that device
2593 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2594 get_order(alias_table_size));
2595 if (amd_iommu_alias_table == NULL)
2598 /* IOMMU rlookup table - find the IOMMU for a specific device */
2599 amd_iommu_rlookup_table = (void *)__get_free_pages(
2600 GFP_KERNEL | __GFP_ZERO,
2601 get_order(rlookup_table_size));
2602 if (amd_iommu_rlookup_table == NULL)
2605 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2606 GFP_KERNEL | __GFP_ZERO,
2607 get_order(MAX_DOMAIN_ID/8));
2608 if (amd_iommu_pd_alloc_bitmap == NULL)
2612 * let all alias entries point to itself
2614 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2615 amd_iommu_alias_table[i] = i;
2618 * never allocate domain 0 because its used as the non-allocated and
2619 * error value placeholder
2621 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2624 * now the data structures are allocated and basically initialized
2625 * start the real acpi table scan
2627 ret = init_iommu_all(ivrs_base);
2631 /* Disable IOMMU if there's Stoney Ridge graphics */
2632 for (i = 0; i < 32; i++) {
2633 pci_id = read_pci_config(0, i, 0, 0);
2634 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2635 pr_info("Disable IOMMU on Stoney Ridge\n");
2636 amd_iommu_disabled = true;
2641 /* Disable any previously enabled IOMMUs */
2642 if (!is_kdump_kernel() || amd_iommu_disabled)
2645 if (amd_iommu_irq_remap)
2646 amd_iommu_irq_remap = check_ioapic_information();
2648 if (amd_iommu_irq_remap) {
2650 * Interrupt remapping enabled, create kmem_cache for the
2654 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2655 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2657 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2658 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2660 IRQ_TABLE_ALIGNMENT,
2662 if (!amd_iommu_irq_cache)
2665 irq_lookup_table = (void *)__get_free_pages(
2666 GFP_KERNEL | __GFP_ZERO,
2667 get_order(rlookup_table_size));
2668 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2670 if (!irq_lookup_table)
2674 ret = init_memory_definitions(ivrs_base);
2678 /* init the device table */
2679 init_device_table();
2682 /* Don't leak any ACPI memory */
2683 acpi_put_table(ivrs_base);
2689 static int amd_iommu_enable_interrupts(void)
2691 struct amd_iommu *iommu;
2694 for_each_iommu(iommu) {
2695 ret = iommu_init_msi(iommu);
2704 static bool detect_ivrs(void)
2706 struct acpi_table_header *ivrs_base;
2709 status = acpi_get_table("IVRS", 0, &ivrs_base);
2710 if (status == AE_NOT_FOUND)
2712 else if (ACPI_FAILURE(status)) {
2713 const char *err = acpi_format_exception(status);
2714 pr_err("IVRS table error: %s\n", err);
2718 acpi_put_table(ivrs_base);
2720 /* Make sure ACS will be enabled during PCI probe */
2726 /****************************************************************************
2728 * AMD IOMMU Initialization State Machine
2730 ****************************************************************************/
2732 static int __init state_next(void)
2736 switch (init_state) {
2737 case IOMMU_START_STATE:
2738 if (!detect_ivrs()) {
2739 init_state = IOMMU_NOT_FOUND;
2742 init_state = IOMMU_IVRS_DETECTED;
2745 case IOMMU_IVRS_DETECTED:
2746 ret = early_amd_iommu_init();
2747 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2748 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2749 pr_info("AMD IOMMU disabled\n");
2750 init_state = IOMMU_CMDLINE_DISABLED;
2754 case IOMMU_ACPI_FINISHED:
2755 early_enable_iommus();
2756 x86_platform.iommu_shutdown = disable_iommus;
2757 init_state = IOMMU_ENABLED;
2760 register_syscore_ops(&amd_iommu_syscore_ops);
2761 ret = amd_iommu_init_pci();
2762 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2765 case IOMMU_PCI_INIT:
2766 ret = amd_iommu_enable_interrupts();
2767 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2769 case IOMMU_INTERRUPTS_EN:
2770 ret = amd_iommu_init_dma_ops();
2771 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2774 init_state = IOMMU_INITIALIZED;
2776 case IOMMU_INITIALIZED:
2779 case IOMMU_NOT_FOUND:
2780 case IOMMU_INIT_ERROR:
2781 case IOMMU_CMDLINE_DISABLED:
2782 /* Error states => do nothing */
2791 free_dma_resources();
2792 if (!irq_remapping_enabled) {
2794 free_iommu_resources();
2796 struct amd_iommu *iommu;
2798 uninit_device_table_dma();
2799 for_each_iommu(iommu)
2800 iommu_flush_all_caches(iommu);
2806 static int __init iommu_go_to_state(enum iommu_init_state state)
2810 while (init_state != state) {
2811 if (init_state == IOMMU_NOT_FOUND ||
2812 init_state == IOMMU_INIT_ERROR ||
2813 init_state == IOMMU_CMDLINE_DISABLED)
2821 #ifdef CONFIG_IRQ_REMAP
2822 int __init amd_iommu_prepare(void)
2826 amd_iommu_irq_remap = true;
2828 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2831 return amd_iommu_irq_remap ? 0 : -ENODEV;
2834 int __init amd_iommu_enable(void)
2838 ret = iommu_go_to_state(IOMMU_ENABLED);
2842 irq_remapping_enabled = 1;
2843 return amd_iommu_xt_mode;
2846 void amd_iommu_disable(void)
2848 amd_iommu_suspend();
2851 int amd_iommu_reenable(int mode)
2858 int __init amd_iommu_enable_faulting(void)
2860 /* We enable MSI later when PCI is initialized */
2866 * This is the core init function for AMD IOMMU hardware in the system.
2867 * This function is called from the generic x86 DMA layer initialization
2870 static int __init amd_iommu_init(void)
2872 struct amd_iommu *iommu;
2875 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2876 #ifdef CONFIG_GART_IOMMU
2877 if (ret && list_empty(&amd_iommu_list)) {
2879 * We failed to initialize the AMD IOMMU - try fallback
2880 * to GART if possible.
2886 for_each_iommu(iommu)
2887 amd_iommu_debugfs_setup(iommu);
2892 static bool amd_iommu_sme_check(void)
2894 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2897 /* For Fam17h, a specific level of support is required */
2898 if (boot_cpu_data.microcode >= 0x08001205)
2901 if ((boot_cpu_data.microcode >= 0x08001126) &&
2902 (boot_cpu_data.microcode <= 0x080011ff))
2905 pr_notice("IOMMU not currently supported when SME is active\n");
2910 /****************************************************************************
2912 * Early detect code. This code runs at IOMMU detection time in the DMA
2913 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2916 ****************************************************************************/
2917 int __init amd_iommu_detect(void)
2921 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2924 if (!amd_iommu_sme_check())
2927 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2931 amd_iommu_detected = true;
2933 x86_init.iommu.iommu_init = amd_iommu_init;
2938 /****************************************************************************
2940 * Parsing functions for the AMD IOMMU specific kernel command line
2943 ****************************************************************************/
2945 static int __init parse_amd_iommu_dump(char *str)
2947 amd_iommu_dump = true;
2952 static int __init parse_amd_iommu_intr(char *str)
2954 for (; *str; ++str) {
2955 if (strncmp(str, "legacy", 6) == 0) {
2956 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2959 if (strncmp(str, "vapic", 5) == 0) {
2960 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2967 static int __init parse_amd_iommu_options(char *str)
2969 for (; *str; ++str) {
2970 if (strncmp(str, "fullflush", 9) == 0)
2971 amd_iommu_unmap_flush = true;
2972 if (strncmp(str, "off", 3) == 0)
2973 amd_iommu_disabled = true;
2974 if (strncmp(str, "force_isolation", 15) == 0)
2975 amd_iommu_force_isolation = true;
2981 static int __init parse_ivrs_ioapic(char *str)
2983 unsigned int bus, dev, fn;
2987 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2990 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
2994 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2995 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3000 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3002 cmdline_maps = true;
3003 i = early_ioapic_map_size++;
3004 early_ioapic_map[i].id = id;
3005 early_ioapic_map[i].devid = devid;
3006 early_ioapic_map[i].cmd_line = true;
3011 static int __init parse_ivrs_hpet(char *str)
3013 unsigned int bus, dev, fn;
3017 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3020 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3024 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3025 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3030 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3032 cmdline_maps = true;
3033 i = early_hpet_map_size++;
3034 early_hpet_map[i].id = id;
3035 early_hpet_map[i].devid = devid;
3036 early_hpet_map[i].cmd_line = true;
3041 static int __init parse_ivrs_acpihid(char *str)
3044 char *hid, *uid, *p;
3045 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3048 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3050 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3055 hid = strsep(&p, ":");
3058 if (!hid || !(*hid) || !uid) {
3059 pr_err("Invalid command line: hid or uid\n");
3063 i = early_acpihid_map_size++;
3064 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3065 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3066 early_acpihid_map[i].devid =
3067 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3068 early_acpihid_map[i].cmd_line = true;
3073 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3074 __setup("amd_iommu=", parse_amd_iommu_options);
3075 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3076 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3077 __setup("ivrs_hpet", parse_ivrs_hpet);
3078 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3080 IOMMU_INIT_FINISH(amd_iommu_detect,
3081 gart_iommu_hole_init,
3085 bool amd_iommu_v2_supported(void)
3087 return amd_iommu_v2_present;
3089 EXPORT_SYMBOL(amd_iommu_v2_supported);
3091 struct amd_iommu *get_amd_iommu(unsigned int idx)
3094 struct amd_iommu *iommu;
3096 for_each_iommu(iommu)
3101 EXPORT_SYMBOL(get_amd_iommu);
3103 /****************************************************************************
3105 * IOMMU EFR Performance Counter support functionality. This code allows
3106 * access to the IOMMU PC functionality.
3108 ****************************************************************************/
3110 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3112 struct amd_iommu *iommu = get_amd_iommu(idx);
3115 return iommu->max_banks;
3119 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3121 bool amd_iommu_pc_supported(void)
3123 return amd_iommu_pc_present;
3125 EXPORT_SYMBOL(amd_iommu_pc_supported);
3127 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3129 struct amd_iommu *iommu = get_amd_iommu(idx);
3132 return iommu->max_counters;
3136 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3138 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3139 u8 fxn, u64 *value, bool is_write)
3144 /* Make sure the IOMMU PC resource is available */
3145 if (!amd_iommu_pc_present)
3148 /* Check for valid iommu and pc register indexing */
3149 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3152 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3154 /* Limit the offset to the hw defined mmio region aperture */
3155 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3156 (iommu->max_counters << 8) | 0x28);
3157 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3158 (offset > max_offset_lim))
3162 u64 val = *value & GENMASK_ULL(47, 0);
3164 writel((u32)val, iommu->mmio_base + offset);
3165 writel((val >> 32), iommu->mmio_base + offset + 4);
3167 *value = readl(iommu->mmio_base + offset + 4);
3169 *value |= readl(iommu->mmio_base + offset);
3170 *value &= GENMASK_ULL(47, 0);
3176 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3181 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3183 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3185 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3190 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3192 EXPORT_SYMBOL(amd_iommu_pc_set_reg);