1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/irq.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/export.h>
22 #include <linux/kmemleak.h>
23 #include <linux/cc_platform.h>
24 #include <linux/iopoll.h>
25 #include <asm/pci-direct.h>
26 #include <asm/iommu.h>
29 #include <asm/x86_init.h>
30 #include <asm/io_apic.h>
31 #include <asm/irq_remapping.h>
32 #include <asm/set_memory.h>
35 #include <linux/crash_dump.h>
37 #include "amd_iommu.h"
38 #include "../irq_remapping.h"
41 * definitions for the ACPI scanning code
43 #define IVRS_HEADER_LENGTH 48
45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46 #define ACPI_IVMD_TYPE_ALL 0x20
47 #define ACPI_IVMD_TYPE 0x21
48 #define ACPI_IVMD_TYPE_RANGE 0x22
50 #define IVHD_DEV_ALL 0x01
51 #define IVHD_DEV_SELECT 0x02
52 #define IVHD_DEV_SELECT_RANGE_START 0x03
53 #define IVHD_DEV_RANGE_END 0x04
54 #define IVHD_DEV_ALIAS 0x42
55 #define IVHD_DEV_ALIAS_RANGE 0x43
56 #define IVHD_DEV_EXT_SELECT 0x46
57 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
58 #define IVHD_DEV_SPECIAL 0x48
59 #define IVHD_DEV_ACPI_HID 0xf0
61 #define UID_NOT_PRESENT 0
62 #define UID_IS_INTEGER 1
63 #define UID_IS_CHARACTER 2
65 #define IVHD_SPECIAL_IOAPIC 1
66 #define IVHD_SPECIAL_HPET 2
68 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
70 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71 #define IVHD_FLAG_ISOC_EN_MASK 0x08
73 #define IVMD_FLAG_EXCL_RANGE 0x08
74 #define IVMD_FLAG_IW 0x04
75 #define IVMD_FLAG_IR 0x02
76 #define IVMD_FLAG_UNITY_MAP 0x01
78 #define ACPI_DEVFLAG_INITPASS 0x01
79 #define ACPI_DEVFLAG_EXTINT 0x02
80 #define ACPI_DEVFLAG_NMI 0x04
81 #define ACPI_DEVFLAG_SYSMGT1 0x10
82 #define ACPI_DEVFLAG_SYSMGT2 0x20
83 #define ACPI_DEVFLAG_LINT0 0x40
84 #define ACPI_DEVFLAG_LINT1 0x80
85 #define ACPI_DEVFLAG_ATSDIS 0x10000000
87 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
88 | ((dev & 0x1f) << 3) | (fn & 0x7))
91 * ACPI table definitions
93 * These data structures are laid over the table to parse the important values
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys.
112 /* Following only valid on IVHD type 11h and 40h */
113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
115 } __attribute__((packed));
118 * A device entry describing which devices a specific IOMMU translates and
119 * which requestor ids they use.
125 struct_group(ext_hid,
133 } __attribute__((packed));
136 * An AMD IOMMU memory definition structure. It defines things like exclusion
137 * ranges for devices and regions that should be unity mapped.
149 } __attribute__((packed));
152 bool amd_iommu_irq_remap __read_mostly;
154 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
155 /* Guest page table level */
156 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
158 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
159 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
161 static bool amd_iommu_detected;
162 static bool amd_iommu_disabled __initdata;
163 static bool amd_iommu_force_enable __initdata;
164 static bool amd_iommu_irtcachedis;
165 static int amd_iommu_target_ivhd_type;
167 /* Global EFR and EFR2 registers */
171 /* SNP is enabled on the system? */
172 bool amd_iommu_snp_en;
173 EXPORT_SYMBOL(amd_iommu_snp_en);
175 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
176 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
179 /* Array to assign indices to IOMMUs*/
180 struct amd_iommu *amd_iommus[MAX_IOMMUS];
182 /* Number of IOMMUs present in the system */
183 static int amd_iommus_present;
185 /* IOMMUs have a non-present cache? */
186 bool amd_iommu_np_cache __read_mostly;
187 bool amd_iommu_iotlb_sup __read_mostly = true;
189 static bool amd_iommu_pc_present __read_mostly;
190 bool amdr_ivrs_remap_support __read_mostly;
192 bool amd_iommu_force_isolation __read_mostly;
195 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
196 * to know which ones are already in use.
198 unsigned long *amd_iommu_pd_alloc_bitmap;
200 enum iommu_init_state {
210 IOMMU_CMDLINE_DISABLED,
213 /* Early ioapic and hpet maps from kernel command line */
214 #define EARLY_MAP_SIZE 4
215 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
216 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
217 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
219 static int __initdata early_ioapic_map_size;
220 static int __initdata early_hpet_map_size;
221 static int __initdata early_acpihid_map_size;
223 static bool __initdata cmdline_maps;
225 static enum iommu_init_state init_state = IOMMU_START_STATE;
227 static int amd_iommu_enable_interrupts(void);
228 static int __init iommu_go_to_state(enum iommu_init_state state);
229 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
231 static bool amd_iommu_pre_enabled = true;
233 static u32 amd_iommu_ivinfo __initdata;
235 bool translation_pre_enabled(struct amd_iommu *iommu)
237 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
240 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
242 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
245 static void init_translation_status(struct amd_iommu *iommu)
249 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
250 if (ctrl & (1<<CONTROL_IOMMU_EN))
251 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
254 static inline unsigned long tbl_size(int entry_size, int last_bdf)
256 unsigned shift = PAGE_SHIFT +
257 get_order((last_bdf + 1) * entry_size);
262 int amd_iommu_get_num_iommus(void)
264 return amd_iommus_present;
268 * Iterate through all the IOMMUs to get common EFR
269 * masks among all IOMMUs and warn if found inconsistency.
271 static __init void get_global_efr(void)
273 struct amd_iommu *iommu;
275 for_each_iommu(iommu) {
276 u64 tmp = iommu->features;
277 u64 tmp2 = iommu->features2;
279 if (list_is_first(&iommu->list, &amd_iommu_list)) {
281 amd_iommu_efr2 = tmp2;
285 if (amd_iommu_efr == tmp &&
286 amd_iommu_efr2 == tmp2)
290 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
291 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
292 iommu->index, iommu->pci_seg->id,
293 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
294 PCI_FUNC(iommu->devid));
296 amd_iommu_efr &= tmp;
297 amd_iommu_efr2 &= tmp2;
300 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
304 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
305 * Default to IVHD EFR since it is available sooner
306 * (i.e. before PCI init).
308 static void __init early_iommu_features_init(struct amd_iommu *iommu,
309 struct ivhd_header *h)
311 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
312 iommu->features = h->efr_reg;
313 iommu->features2 = h->efr_reg2;
315 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
316 amdr_ivrs_remap_support = true;
319 /* Access to l1 and l2 indexed register spaces */
321 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
325 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
326 pci_read_config_dword(iommu->dev, 0xfc, &val);
330 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
332 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
333 pci_write_config_dword(iommu->dev, 0xfc, val);
334 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
337 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
341 pci_write_config_dword(iommu->dev, 0xf0, address);
342 pci_read_config_dword(iommu->dev, 0xf4, &val);
346 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
348 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
349 pci_write_config_dword(iommu->dev, 0xf4, val);
352 /****************************************************************************
354 * AMD IOMMU MMIO register space handling functions
356 * These functions are used to program the IOMMU device registers in
357 * MMIO space required for that driver.
359 ****************************************************************************/
362 * This function set the exclusion range in the IOMMU. DMA accesses to the
363 * exclusion range are passed through untranslated
365 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
367 u64 start = iommu->exclusion_start & PAGE_MASK;
368 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
371 if (!iommu->exclusion_start)
374 entry = start | MMIO_EXCL_ENABLE_MASK;
375 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
376 &entry, sizeof(entry));
379 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
380 &entry, sizeof(entry));
383 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
385 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
386 u64 entry = start & PM_ADDR_MASK;
388 if (!check_feature(FEATURE_SNP))
392 * Re-purpose Exclusion base/limit registers for Completion wait
393 * write-back base/limit.
395 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
396 &entry, sizeof(entry));
399 * Default to 4 Kbytes, which can be specified by setting base
400 * address equal to the limit address.
402 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
403 &entry, sizeof(entry));
406 /* Programs the physical address of the device table into the IOMMU hardware */
407 static void iommu_set_device_table(struct amd_iommu *iommu)
410 u32 dev_table_size = iommu->pci_seg->dev_table_size;
411 void *dev_table = (void *)get_dev_table(iommu);
413 BUG_ON(iommu->mmio_base == NULL);
415 entry = iommu_virt_to_phys(dev_table);
416 entry |= (dev_table_size >> 12) - 1;
417 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
418 &entry, sizeof(entry));
421 /* Generic functions to enable/disable certain features of the IOMMU. */
422 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
426 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
427 ctrl |= (1ULL << bit);
428 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
431 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
435 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
436 ctrl &= ~(1ULL << bit);
437 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
440 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
444 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
445 ctrl &= ~CTRL_INV_TO_MASK;
446 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
447 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
450 /* Function to enable the hardware */
451 static void iommu_enable(struct amd_iommu *iommu)
453 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
456 static void iommu_disable(struct amd_iommu *iommu)
458 if (!iommu->mmio_base)
461 /* Disable command buffer */
462 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
464 /* Disable event logging and event interrupts */
465 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
466 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
468 /* Disable IOMMU GA_LOG */
469 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
470 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
472 /* Disable IOMMU PPR logging */
473 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
474 iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
476 /* Disable IOMMU hardware itself */
477 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
479 /* Clear IRTE cache disabling bit */
480 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
484 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
485 * the system has one.
487 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
489 if (!request_mem_region(address, end, "amd_iommu")) {
490 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
492 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
496 return (u8 __iomem *)ioremap(address, end);
499 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
501 if (iommu->mmio_base)
502 iounmap(iommu->mmio_base);
503 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
506 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
522 /****************************************************************************
524 * The functions below belong to the first pass of AMD IOMMU ACPI table
525 * parsing. In this pass we try to find out the highest device id this
526 * code has to handle. Upon this information the size of the shared data
527 * structures is determined later.
529 ****************************************************************************/
532 * This function calculates the length of a given IVHD entry
534 static inline int ivhd_entry_length(u8 *ivhd)
536 u32 type = ((struct ivhd_entry *)ivhd)->type;
539 return 0x04 << (*ivhd >> 6);
540 } else if (type == IVHD_DEV_ACPI_HID) {
541 /* For ACPI_HID, offset 21 is uid len */
542 return *((u8 *)ivhd + 21) + 22;
548 * After reading the highest device id from the IOMMU PCI capability header
549 * this function looks if there is a higher device id defined in the ACPI table
551 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
553 u8 *p = (void *)h, *end = (void *)h;
554 struct ivhd_entry *dev;
555 int last_devid = -EINVAL;
557 u32 ivhd_size = get_ivhd_header_size(h);
560 pr_err("Unsupported IVHD type %#x\n", h->type);
568 dev = (struct ivhd_entry *)p;
571 /* Use maximum BDF value for DEV_ALL */
573 case IVHD_DEV_SELECT:
574 case IVHD_DEV_RANGE_END:
576 case IVHD_DEV_EXT_SELECT:
577 /* all the above subfield types refer to device ids */
578 if (dev->devid > last_devid)
579 last_devid = dev->devid;
584 p += ivhd_entry_length(p);
592 static int __init check_ivrs_checksum(struct acpi_table_header *table)
595 u8 checksum = 0, *p = (u8 *)table;
597 for (i = 0; i < table->length; ++i)
600 /* ACPI table corrupt */
601 pr_err(FW_BUG "IVRS invalid checksum\n");
609 * Iterate over all IVHD entries in the ACPI table and find the highest device
610 * id which we need to handle. This is the first of three functions which parse
611 * the ACPI table. So we check the checksum here.
613 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
615 u8 *p = (u8 *)table, *end = (u8 *)table;
616 struct ivhd_header *h;
617 int last_devid, last_bdf = 0;
619 p += IVRS_HEADER_LENGTH;
621 end += table->length;
623 h = (struct ivhd_header *)p;
624 if (h->pci_seg == pci_seg &&
625 h->type == amd_iommu_target_ivhd_type) {
626 last_devid = find_last_devid_from_ivhd(h);
630 if (last_devid > last_bdf)
631 last_bdf = last_devid;
640 /****************************************************************************
642 * The following functions belong to the code path which parses the ACPI table
643 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
644 * data structures, initialize the per PCI segment device/alias/rlookup table
645 * and also basically initialize the hardware.
647 ****************************************************************************/
649 /* Allocate per PCI segment device table */
650 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
652 pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
653 get_order(pci_seg->dev_table_size));
654 if (!pci_seg->dev_table)
660 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
662 free_pages((unsigned long)pci_seg->dev_table,
663 get_order(pci_seg->dev_table_size));
664 pci_seg->dev_table = NULL;
667 /* Allocate per PCI segment IOMMU rlookup table. */
668 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
670 pci_seg->rlookup_table = (void *)__get_free_pages(
671 GFP_KERNEL | __GFP_ZERO,
672 get_order(pci_seg->rlookup_table_size));
673 if (pci_seg->rlookup_table == NULL)
679 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
681 free_pages((unsigned long)pci_seg->rlookup_table,
682 get_order(pci_seg->rlookup_table_size));
683 pci_seg->rlookup_table = NULL;
686 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
688 pci_seg->irq_lookup_table = (void *)__get_free_pages(
689 GFP_KERNEL | __GFP_ZERO,
690 get_order(pci_seg->rlookup_table_size));
691 kmemleak_alloc(pci_seg->irq_lookup_table,
692 pci_seg->rlookup_table_size, 1, GFP_KERNEL);
693 if (pci_seg->irq_lookup_table == NULL)
699 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
701 kmemleak_free(pci_seg->irq_lookup_table);
702 free_pages((unsigned long)pci_seg->irq_lookup_table,
703 get_order(pci_seg->rlookup_table_size));
704 pci_seg->irq_lookup_table = NULL;
707 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
711 pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
712 get_order(pci_seg->alias_table_size));
713 if (!pci_seg->alias_table)
717 * let all alias entries point to itself
719 for (i = 0; i <= pci_seg->last_bdf; ++i)
720 pci_seg->alias_table[i] = i;
725 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
727 free_pages((unsigned long)pci_seg->alias_table,
728 get_order(pci_seg->alias_table_size));
729 pci_seg->alias_table = NULL;
733 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
734 * write commands to that buffer later and the IOMMU will execute them
737 static int __init alloc_command_buffer(struct amd_iommu *iommu)
739 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
740 get_order(CMD_BUFFER_SIZE));
742 return iommu->cmd_buf ? 0 : -ENOMEM;
746 * Interrupt handler has processed all pending events and adjusted head
747 * and tail pointer. Reset overflow mask and restart logging again.
749 static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
750 u8 cntrl_intr, u8 cntrl_log,
751 u32 status_run_mask, u32 status_overflow_mask)
755 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
756 if (status & status_run_mask)
759 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
761 iommu_feature_disable(iommu, cntrl_log);
762 iommu_feature_disable(iommu, cntrl_intr);
764 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
766 iommu_feature_enable(iommu, cntrl_intr);
767 iommu_feature_enable(iommu, cntrl_log);
771 * This function restarts event logging in case the IOMMU experienced
772 * an event log buffer overflow.
774 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
776 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
777 CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
778 MMIO_STATUS_EVT_OVERFLOW_MASK);
782 * This function restarts event logging in case the IOMMU experienced
785 void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
787 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
788 CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
789 MMIO_STATUS_GALOG_OVERFLOW_MASK);
793 * This function restarts ppr logging in case the IOMMU experienced
796 void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
798 amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
799 CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
800 MMIO_STATUS_PPR_OVERFLOW_MASK);
804 * This function resets the command buffer if the IOMMU stopped fetching
807 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
809 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
811 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
812 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
813 iommu->cmd_buf_head = 0;
814 iommu->cmd_buf_tail = 0;
816 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
820 * This function writes the command buffer address to the hardware and
823 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
827 BUG_ON(iommu->cmd_buf == NULL);
829 entry = iommu_virt_to_phys(iommu->cmd_buf);
830 entry |= MMIO_CMD_SIZE_512;
832 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
833 &entry, sizeof(entry));
835 amd_iommu_reset_cmd_buffer(iommu);
839 * This function disables the command buffer
841 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
843 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
846 static void __init free_command_buffer(struct amd_iommu *iommu)
848 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
851 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
852 gfp_t gfp, size_t size)
854 int order = get_order(size);
855 void *buf = (void *)__get_free_pages(gfp, order);
858 check_feature(FEATURE_SNP) &&
859 set_memory_4k((unsigned long)buf, (1 << order))) {
860 free_pages((unsigned long)buf, order);
867 /* allocates the memory where the IOMMU will log its events to */
868 static int __init alloc_event_buffer(struct amd_iommu *iommu)
870 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
873 return iommu->evt_buf ? 0 : -ENOMEM;
876 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
880 BUG_ON(iommu->evt_buf == NULL);
882 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
884 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
885 &entry, sizeof(entry));
887 /* set head and tail to zero manually */
888 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
889 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
891 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
895 * This function disables the event log buffer
897 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
899 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
902 static void __init free_event_buffer(struct amd_iommu *iommu)
904 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
907 /* allocates the memory where the IOMMU will log its events to */
908 static int __init alloc_ppr_log(struct amd_iommu *iommu)
910 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
913 return iommu->ppr_log ? 0 : -ENOMEM;
916 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
920 if (iommu->ppr_log == NULL)
923 iommu_feature_enable(iommu, CONTROL_PPR_EN);
925 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
927 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
928 &entry, sizeof(entry));
930 /* set head and tail to zero manually */
931 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
932 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
934 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
935 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
938 static void __init free_ppr_log(struct amd_iommu *iommu)
940 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
943 static void free_ga_log(struct amd_iommu *iommu)
945 #ifdef CONFIG_IRQ_REMAP
946 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
947 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
951 #ifdef CONFIG_IRQ_REMAP
952 static int iommu_ga_log_enable(struct amd_iommu *iommu)
960 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
961 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
962 &entry, sizeof(entry));
963 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
964 (BIT_ULL(52)-1)) & ~7ULL;
965 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
966 &entry, sizeof(entry));
967 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
968 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
971 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
972 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
974 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
975 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
976 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
981 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
987 static int iommu_init_ga_log(struct amd_iommu *iommu)
989 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
992 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
993 get_order(GA_LOG_SIZE));
997 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
999 if (!iommu->ga_log_tail)
1007 #endif /* CONFIG_IRQ_REMAP */
1009 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
1011 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
1013 return iommu->cmd_sem ? 0 : -ENOMEM;
1016 static void __init free_cwwb_sem(struct amd_iommu *iommu)
1019 free_page((unsigned long)iommu->cmd_sem);
1022 static void iommu_enable_xt(struct amd_iommu *iommu)
1024 #ifdef CONFIG_IRQ_REMAP
1026 * XT mode (32-bit APIC destination ID) requires
1027 * GA mode (128-bit IRTE support) as a prerequisite.
1029 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
1030 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1031 iommu_feature_enable(iommu, CONTROL_XT_EN);
1032 #endif /* CONFIG_IRQ_REMAP */
1035 static void iommu_enable_gt(struct amd_iommu *iommu)
1037 if (!check_feature(FEATURE_GT))
1040 iommu_feature_enable(iommu, CONTROL_GT_EN);
1043 /* sets a specific bit in the device table entry. */
1044 static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
1047 int i = (bit >> 6) & 0x03;
1048 int _bit = bit & 0x3f;
1050 dev_table[devid].data[i] |= (1UL << _bit);
1053 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1055 struct dev_table_entry *dev_table = get_dev_table(iommu);
1057 return __set_dev_entry_bit(dev_table, devid, bit);
1060 static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
1063 int i = (bit >> 6) & 0x03;
1064 int _bit = bit & 0x3f;
1066 return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
1069 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1071 struct dev_table_entry *dev_table = get_dev_table(iommu);
1073 return __get_dev_entry_bit(dev_table, devid, bit);
1076 static bool __copy_device_table(struct amd_iommu *iommu)
1078 u64 int_ctl, int_tab_len, entry = 0;
1079 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1080 struct dev_table_entry *old_devtb = NULL;
1081 u32 lo, hi, devid, old_devtb_size;
1082 phys_addr_t old_devtb_phys;
1083 u16 dom_id, dte_v, irq_v;
1087 /* Each IOMMU use separate device table with the same size */
1088 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
1089 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
1090 entry = (((u64) hi) << 32) + lo;
1092 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
1093 if (old_devtb_size != pci_seg->dev_table_size) {
1094 pr_err("The device table size of IOMMU:%d is not expected!\n",
1100 * When SME is enabled in the first kernel, the entry includes the
1101 * memory encryption mask(sme_me_mask), we must remove the memory
1102 * encryption mask to obtain the true physical address in kdump kernel.
1104 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
1106 if (old_devtb_phys >= 0x100000000ULL) {
1107 pr_err("The address of old device table is above 4G, not trustworthy!\n");
1110 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
1111 ? (__force void *)ioremap_encrypted(old_devtb_phys,
1112 pci_seg->dev_table_size)
1113 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
1118 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
1119 pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
1120 get_order(pci_seg->dev_table_size));
1121 if (pci_seg->old_dev_tbl_cpy == NULL) {
1122 pr_err("Failed to allocate memory for copying old device table!\n");
1123 memunmap(old_devtb);
1127 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
1128 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
1129 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
1130 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1132 if (dte_v && dom_id) {
1133 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1134 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1135 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1136 /* If gcr3 table existed, mask it out */
1137 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1138 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1139 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1140 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1141 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1143 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1147 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1148 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1149 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1150 if (irq_v && (int_ctl || int_tab_len)) {
1151 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1152 (int_tab_len != DTE_INTTABLEN)) {
1153 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1154 memunmap(old_devtb);
1158 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1161 memunmap(old_devtb);
1166 static bool copy_device_table(void)
1168 struct amd_iommu *iommu;
1169 struct amd_iommu_pci_seg *pci_seg;
1171 if (!amd_iommu_pre_enabled)
1174 pr_warn("Translation is already enabled - trying to copy translation structures\n");
1177 * All IOMMUs within PCI segment shares common device table.
1178 * Hence copy device table only once per PCI segment.
1180 for_each_pci_segment(pci_seg) {
1181 for_each_iommu(iommu) {
1182 if (pci_seg->id != iommu->pci_seg->id)
1184 if (!__copy_device_table(iommu))
1193 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
1197 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
1198 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
1201 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
1205 * This function takes the device specific flags read from the ACPI
1206 * table and sets up the device table entry with that information
1208 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1209 u16 devid, u32 flags, u32 ext_flags)
1211 if (flags & ACPI_DEVFLAG_INITPASS)
1212 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
1213 if (flags & ACPI_DEVFLAG_EXTINT)
1214 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
1215 if (flags & ACPI_DEVFLAG_NMI)
1216 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
1217 if (flags & ACPI_DEVFLAG_SYSMGT1)
1218 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
1219 if (flags & ACPI_DEVFLAG_SYSMGT2)
1220 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
1221 if (flags & ACPI_DEVFLAG_LINT0)
1222 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
1223 if (flags & ACPI_DEVFLAG_LINT1)
1224 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
1226 amd_iommu_apply_erratum_63(iommu, devid);
1228 amd_iommu_set_rlookup_table(iommu, devid);
1231 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
1233 struct devid_map *entry;
1234 struct list_head *list;
1236 if (type == IVHD_SPECIAL_IOAPIC)
1238 else if (type == IVHD_SPECIAL_HPET)
1243 list_for_each_entry(entry, list, list) {
1244 if (!(entry->id == id && entry->cmd_line))
1247 pr_info("Command-line override present for %s id %d - ignoring\n",
1248 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1250 *devid = entry->devid;
1255 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1260 entry->devid = *devid;
1261 entry->cmd_line = cmd_line;
1263 list_add_tail(&entry->list, list);
1268 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
1271 struct acpihid_map_entry *entry;
1272 struct list_head *list = &acpihid_map;
1274 list_for_each_entry(entry, list, list) {
1275 if (strcmp(entry->hid, hid) ||
1276 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1280 pr_info("Command-line override for hid:%s uid:%s\n",
1282 *devid = entry->devid;
1286 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1290 memcpy(entry->uid, uid, strlen(uid));
1291 memcpy(entry->hid, hid, strlen(hid));
1292 entry->devid = *devid;
1293 entry->cmd_line = cmd_line;
1294 entry->root_devid = (entry->devid & (~0x7));
1296 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1297 entry->cmd_line ? "cmd" : "ivrs",
1298 entry->hid, entry->uid, entry->root_devid);
1300 list_add_tail(&entry->list, list);
1304 static int __init add_early_maps(void)
1308 for (i = 0; i < early_ioapic_map_size; ++i) {
1309 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1310 early_ioapic_map[i].id,
1311 &early_ioapic_map[i].devid,
1312 early_ioapic_map[i].cmd_line);
1317 for (i = 0; i < early_hpet_map_size; ++i) {
1318 ret = add_special_device(IVHD_SPECIAL_HPET,
1319 early_hpet_map[i].id,
1320 &early_hpet_map[i].devid,
1321 early_hpet_map[i].cmd_line);
1326 for (i = 0; i < early_acpihid_map_size; ++i) {
1327 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1328 early_acpihid_map[i].uid,
1329 &early_acpihid_map[i].devid,
1330 early_acpihid_map[i].cmd_line);
1339 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1340 * initializes the hardware and our data structures with it.
1342 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1343 struct ivhd_header *h)
1346 u8 *end = p, flags = 0;
1347 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
1348 u32 dev_i, ext_flags = 0;
1350 struct ivhd_entry *e;
1351 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1356 ret = add_early_maps();
1360 amd_iommu_apply_ivrs_quirks();
1363 * First save the recommended feature enable bits from ACPI
1365 iommu->acpi_flags = h->flags;
1368 * Done. Now parse the device entries
1370 ivhd_size = get_ivhd_header_size(h);
1372 pr_err("Unsupported IVHD type %#x\n", h->type);
1382 e = (struct ivhd_entry *)p;
1383 seg_id = pci_seg->id;
1388 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1390 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
1391 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1393 case IVHD_DEV_SELECT:
1395 DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
1397 seg_id, PCI_BUS_NUM(e->devid),
1403 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1405 case IVHD_DEV_SELECT_RANGE_START:
1407 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1408 "devid: %04x:%02x:%02x.%x flags: %02x\n",
1409 seg_id, PCI_BUS_NUM(e->devid),
1414 devid_start = e->devid;
1419 case IVHD_DEV_ALIAS:
1421 DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
1422 "flags: %02x devid_to: %02x:%02x.%x\n",
1423 seg_id, PCI_BUS_NUM(e->devid),
1427 PCI_BUS_NUM(e->ext >> 8),
1428 PCI_SLOT(e->ext >> 8),
1429 PCI_FUNC(e->ext >> 8));
1432 devid_to = e->ext >> 8;
1433 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1434 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1435 pci_seg->alias_table[devid] = devid_to;
1437 case IVHD_DEV_ALIAS_RANGE:
1439 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1440 "devid: %04x:%02x:%02x.%x flags: %02x "
1441 "devid_to: %04x:%02x:%02x.%x\n",
1442 seg_id, PCI_BUS_NUM(e->devid),
1446 seg_id, PCI_BUS_NUM(e->ext >> 8),
1447 PCI_SLOT(e->ext >> 8),
1448 PCI_FUNC(e->ext >> 8));
1450 devid_start = e->devid;
1452 devid_to = e->ext >> 8;
1456 case IVHD_DEV_EXT_SELECT:
1458 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
1459 "flags: %02x ext: %08x\n",
1460 seg_id, PCI_BUS_NUM(e->devid),
1466 set_dev_entry_from_acpi(iommu, devid, e->flags,
1469 case IVHD_DEV_EXT_SELECT_RANGE:
1471 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1472 "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
1473 seg_id, PCI_BUS_NUM(e->devid),
1478 devid_start = e->devid;
1483 case IVHD_DEV_RANGE_END:
1485 DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
1486 seg_id, PCI_BUS_NUM(e->devid),
1488 PCI_FUNC(e->devid));
1491 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1493 pci_seg->alias_table[dev_i] = devid_to;
1494 set_dev_entry_from_acpi(iommu,
1495 devid_to, flags, ext_flags);
1497 set_dev_entry_from_acpi(iommu, dev_i,
1501 case IVHD_DEV_SPECIAL: {
1507 handle = e->ext & 0xff;
1508 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
1509 type = (e->ext >> 24) & 0xff;
1511 if (type == IVHD_SPECIAL_IOAPIC)
1513 else if (type == IVHD_SPECIAL_HPET)
1518 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
1520 seg_id, PCI_BUS_NUM(devid),
1524 ret = add_special_device(type, handle, &devid, false);
1529 * add_special_device might update the devid in case a
1530 * command-line override is present. So call
1531 * set_dev_entry_from_acpi after add_special_device.
1533 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1537 case IVHD_DEV_ACPI_HID: {
1539 u8 hid[ACPIHID_HID_LEN];
1540 u8 uid[ACPIHID_UID_LEN];
1543 if (h->type != 0x40) {
1544 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1549 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1550 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1551 hid[ACPIHID_HID_LEN - 1] = '\0';
1554 pr_err(FW_BUG "Invalid HID.\n");
1560 case UID_NOT_PRESENT:
1563 pr_warn(FW_BUG "Invalid UID length.\n");
1566 case UID_IS_INTEGER:
1568 sprintf(uid, "%d", e->uid);
1571 case UID_IS_CHARACTER:
1573 memcpy(uid, &e->uid, e->uidl);
1574 uid[e->uidl] = '\0';
1581 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
1582 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
1590 ret = add_acpi_hid_device(hid, uid, &devid, false);
1595 * add_special_device might update the devid in case a
1596 * command-line override is present. So call
1597 * set_dev_entry_from_acpi after add_special_device.
1599 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1607 p += ivhd_entry_length(p);
1613 /* Allocate PCI segment data structure */
1614 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
1615 struct acpi_table_header *ivrs_base)
1617 struct amd_iommu_pci_seg *pci_seg;
1621 * First parse ACPI tables to find the largest Bus/Dev/Func we need to
1622 * handle in this PCI segment. Upon this information the shared data
1623 * structures for the PCI segments in the system will be allocated.
1625 last_bdf = find_last_devid_acpi(ivrs_base, id);
1629 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
1630 if (pci_seg == NULL)
1633 pci_seg->last_bdf = last_bdf;
1634 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
1635 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
1636 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
1637 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
1640 init_llist_head(&pci_seg->dev_data_list);
1641 INIT_LIST_HEAD(&pci_seg->unity_map);
1642 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
1644 if (alloc_dev_table(pci_seg))
1646 if (alloc_alias_table(pci_seg))
1648 if (alloc_rlookup_table(pci_seg))
1654 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
1655 struct acpi_table_header *ivrs_base)
1657 struct amd_iommu_pci_seg *pci_seg;
1659 for_each_pci_segment(pci_seg) {
1660 if (pci_seg->id == id)
1664 return alloc_pci_segment(id, ivrs_base);
1667 static void __init free_pci_segments(void)
1669 struct amd_iommu_pci_seg *pci_seg, *next;
1671 for_each_pci_segment_safe(pci_seg, next) {
1672 list_del(&pci_seg->list);
1673 free_irq_lookup_table(pci_seg);
1674 free_rlookup_table(pci_seg);
1675 free_alias_table(pci_seg);
1676 free_dev_table(pci_seg);
1681 static void __init free_iommu_one(struct amd_iommu *iommu)
1683 free_cwwb_sem(iommu);
1684 free_command_buffer(iommu);
1685 free_event_buffer(iommu);
1686 free_ppr_log(iommu);
1688 iommu_unmap_mmio_space(iommu);
1691 static void __init free_iommu_all(void)
1693 struct amd_iommu *iommu, *next;
1695 for_each_iommu_safe(iommu, next) {
1696 list_del(&iommu->list);
1697 free_iommu_one(iommu);
1703 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1705 * BIOS should disable L2B micellaneous clock gating by setting
1706 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1708 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1712 if ((boot_cpu_data.x86 != 0x15) ||
1713 (boot_cpu_data.x86_model < 0x10) ||
1714 (boot_cpu_data.x86_model > 0x1f))
1717 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1718 pci_read_config_dword(iommu->dev, 0xf4, &value);
1723 /* Select NB indirect register 0x90 and enable writing */
1724 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1726 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1727 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1729 /* Clear the enable writing bit */
1730 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1734 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1736 * BIOS should enable ATS write permission check by setting
1737 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1739 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1743 if ((boot_cpu_data.x86 != 0x15) ||
1744 (boot_cpu_data.x86_model < 0x30) ||
1745 (boot_cpu_data.x86_model > 0x3f))
1748 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1749 value = iommu_read_l2(iommu, 0x47);
1754 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1755 iommu_write_l2(iommu, 0x47, value | BIT(0));
1757 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1761 * This function glues the initialization function for one IOMMU
1762 * together and also allocates the command buffer and programs the
1763 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1765 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1766 struct acpi_table_header *ivrs_base)
1768 struct amd_iommu_pci_seg *pci_seg;
1770 pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
1771 if (pci_seg == NULL)
1773 iommu->pci_seg = pci_seg;
1775 raw_spin_lock_init(&iommu->lock);
1776 atomic64_set(&iommu->cmd_sem_val, 0);
1778 /* Add IOMMU to internal data structures */
1779 list_add_tail(&iommu->list, &amd_iommu_list);
1780 iommu->index = amd_iommus_present++;
1782 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1783 WARN(1, "System has more IOMMUs than supported by this driver\n");
1787 /* Index is fine - add IOMMU to the array */
1788 amd_iommus[iommu->index] = iommu;
1791 * Copy data from ACPI table entry to the iommu struct
1793 iommu->devid = h->devid;
1794 iommu->cap_ptr = h->cap_ptr;
1795 iommu->mmio_phys = h->mmio_phys;
1799 /* Check if IVHD EFR contains proper max banks/counters */
1800 if ((h->efr_attr != 0) &&
1801 ((h->efr_attr & (0xF << 13)) != 0) &&
1802 ((h->efr_attr & (0x3F << 17)) != 0))
1803 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1805 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1808 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1809 * GAM also requires GA mode. Therefore, we need to
1810 * check cmpxchg16b support before enabling it.
1812 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1813 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1814 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1818 if (h->efr_reg & (1 << 9))
1819 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1821 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1824 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1825 * XT, GAM also requires GA mode. Therefore, we need to
1826 * check cmpxchg16b support before enabling them.
1828 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1829 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1830 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1834 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1835 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1837 early_iommu_features_init(iommu, h);
1844 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1845 iommu->mmio_phys_end);
1846 if (!iommu->mmio_base)
1849 return init_iommu_from_acpi(iommu, h);
1852 static int __init init_iommu_one_late(struct amd_iommu *iommu)
1856 if (alloc_cwwb_sem(iommu))
1859 if (alloc_command_buffer(iommu))
1862 if (alloc_event_buffer(iommu))
1865 iommu->int_enabled = false;
1867 init_translation_status(iommu);
1868 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1869 iommu_disable(iommu);
1870 clear_translation_pre_enabled(iommu);
1871 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1874 if (amd_iommu_pre_enabled)
1875 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1877 if (amd_iommu_irq_remap) {
1878 ret = amd_iommu_create_irq_domain(iommu);
1884 * Make sure IOMMU is not considered to translate itself. The IVRS
1885 * table tells us so, but this is a lie!
1887 iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1893 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1894 * @ivrs: Pointer to the IVRS header
1896 * This function search through all IVDB of the maximum supported IVHD
1898 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1900 u8 *base = (u8 *)ivrs;
1901 struct ivhd_header *ivhd = (struct ivhd_header *)
1902 (base + IVRS_HEADER_LENGTH);
1903 u8 last_type = ivhd->type;
1904 u16 devid = ivhd->devid;
1906 while (((u8 *)ivhd - base < ivrs->length) &&
1907 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1908 u8 *p = (u8 *) ivhd;
1910 if (ivhd->devid == devid)
1911 last_type = ivhd->type;
1912 ivhd = (struct ivhd_header *)(p + ivhd->length);
1919 * Iterates over all IOMMU entries in the ACPI table, allocates the
1920 * IOMMU structure and initializes it with init_iommu_one()
1922 static int __init init_iommu_all(struct acpi_table_header *table)
1924 u8 *p = (u8 *)table, *end = (u8 *)table;
1925 struct ivhd_header *h;
1926 struct amd_iommu *iommu;
1929 end += table->length;
1930 p += IVRS_HEADER_LENGTH;
1932 /* Phase 1: Process all IVHD blocks */
1934 h = (struct ivhd_header *)p;
1935 if (*p == amd_iommu_target_ivhd_type) {
1937 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
1938 "flags: %01x info %04x\n",
1939 h->pci_seg, PCI_BUS_NUM(h->devid),
1940 PCI_SLOT(h->devid), PCI_FUNC(h->devid),
1941 h->cap_ptr, h->flags, h->info);
1942 DUMP_printk(" mmio-addr: %016llx\n",
1945 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1949 ret = init_iommu_one(iommu, h, table);
1958 /* Phase 2 : Early feature support check */
1961 /* Phase 3 : Enabling IOMMU features */
1962 for_each_iommu(iommu) {
1963 ret = init_iommu_one_late(iommu);
1971 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1974 struct pci_dev *pdev = iommu->dev;
1976 if (!check_feature(FEATURE_PC))
1979 amd_iommu_pc_present = true;
1981 pci_info(pdev, "IOMMU performance counters supported\n");
1983 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1984 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1985 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1990 static ssize_t amd_iommu_show_cap(struct device *dev,
1991 struct device_attribute *attr,
1994 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1995 return sysfs_emit(buf, "%x\n", iommu->cap);
1997 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1999 static ssize_t amd_iommu_show_features(struct device *dev,
2000 struct device_attribute *attr,
2003 return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
2005 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
2007 static struct attribute *amd_iommu_attrs[] = {
2009 &dev_attr_features.attr,
2013 static struct attribute_group amd_iommu_group = {
2014 .name = "amd-iommu",
2015 .attrs = amd_iommu_attrs,
2018 static const struct attribute_group *amd_iommu_groups[] = {
2024 * Note: IVHD 0x11 and 0x40 also contains exact copy
2025 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
2026 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
2028 static void __init late_iommu_features_init(struct amd_iommu *iommu)
2030 u64 features, features2;
2032 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
2035 /* read extended feature bits */
2036 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
2037 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
2039 if (!amd_iommu_efr) {
2040 amd_iommu_efr = features;
2041 amd_iommu_efr2 = features2;
2046 * Sanity check and warn if EFR values from
2047 * IVHD and MMIO conflict.
2049 if (features != amd_iommu_efr ||
2050 features2 != amd_iommu_efr2) {
2052 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
2053 features, amd_iommu_efr,
2054 features2, amd_iommu_efr2);
2058 static int __init iommu_init_pci(struct amd_iommu *iommu)
2060 int cap_ptr = iommu->cap_ptr;
2063 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2064 PCI_BUS_NUM(iommu->devid),
2065 iommu->devid & 0xff);
2069 /* Prevent binding other PCI device drivers to IOMMU devices */
2070 iommu->dev->match_driver = false;
2072 /* ACPI _PRT won't have an IRQ for IOMMU */
2073 iommu->dev->irq_managed = 1;
2075 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
2078 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2079 amd_iommu_iotlb_sup = false;
2081 late_iommu_features_init(iommu);
2083 if (check_feature(FEATURE_GT)) {
2087 pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
2088 pasmax >>= FEATURE_PASID_SHIFT;
2089 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
2091 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
2093 glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK;
2094 glxval >>= FEATURE_GLXVAL_SHIFT;
2096 if (amd_iommu_max_glx_val == -1)
2097 amd_iommu_max_glx_val = glxval;
2099 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
2102 if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
2105 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2106 pr_info("Using strict mode due to virtualization\n");
2107 iommu_set_dma_strict();
2108 amd_iommu_np_cache = true;
2111 init_iommu_perf_ctr(iommu);
2113 if (amd_iommu_pgtable == AMD_IOMMU_V2) {
2114 if (!check_feature(FEATURE_GIOSUP) ||
2115 !check_feature(FEATURE_GT)) {
2116 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
2117 amd_iommu_pgtable = AMD_IOMMU_V1;
2121 if (is_rd890_iommu(iommu->dev)) {
2125 pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2126 iommu->dev->bus->number,
2130 * Some rd890 systems may not be fully reconfigured by the
2131 * BIOS, so it's necessary for us to store this information so
2132 * it can be reprogrammed on resume
2134 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
2135 &iommu->stored_addr_lo);
2136 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
2137 &iommu->stored_addr_hi);
2139 /* Low bit locks writes to configuration space */
2140 iommu->stored_addr_lo &= ~1;
2142 for (i = 0; i < 6; i++)
2143 for (j = 0; j < 0x12; j++)
2144 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
2146 for (i = 0; i < 0x83; i++)
2147 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
2150 amd_iommu_erratum_746_workaround(iommu);
2151 amd_iommu_ats_write_check_workaround(iommu);
2153 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
2154 amd_iommu_groups, "ivhd%d", iommu->index);
2158 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
2160 return pci_enable_device(iommu->dev);
2163 static void print_iommu_info(void)
2166 static const char * const feat_str[] = {
2167 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
2168 "IA", "GA", "HE", "PC"
2171 if (amd_iommu_efr) {
2172 pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
2174 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
2175 if (check_feature(1ULL << i))
2176 pr_cont(" %s", feat_str[i]);
2179 if (check_feature(FEATURE_GAM_VAPIC))
2180 pr_cont(" GA_vAPIC");
2182 if (check_feature(FEATURE_SNP))
2188 if (irq_remapping_enabled) {
2189 pr_info("Interrupt remapping enabled\n");
2190 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2191 pr_info("X2APIC enabled\n");
2193 if (amd_iommu_pgtable == AMD_IOMMU_V2) {
2194 pr_info("V2 page table enabled (Paging mode : %d level)\n",
2195 amd_iommu_gpt_level);
2199 static int __init amd_iommu_init_pci(void)
2201 struct amd_iommu *iommu;
2202 struct amd_iommu_pci_seg *pci_seg;
2205 for_each_iommu(iommu) {
2206 ret = iommu_init_pci(iommu);
2208 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
2212 /* Need to setup range after PCI init */
2213 iommu_set_cwwb_range(iommu);
2217 * Order is important here to make sure any unity map requirements are
2218 * fulfilled. The unity mappings are created and written to the device
2219 * table during the iommu_init_pci() call.
2221 * After that we call init_device_table_dma() to make sure any
2222 * uninitialized DTE will block DMA, and in the end we flush the caches
2223 * of all IOMMUs to make sure the changes to the device table are
2226 for_each_pci_segment(pci_seg)
2227 init_device_table_dma(pci_seg);
2229 for_each_iommu(iommu)
2230 amd_iommu_flush_all_caches(iommu);
2238 /****************************************************************************
2240 * The following functions initialize the MSI interrupts for all IOMMUs
2241 * in the system. It's a bit challenging because there could be multiple
2242 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2245 ****************************************************************************/
2247 static int iommu_setup_msi(struct amd_iommu *iommu)
2251 r = pci_enable_msi(iommu->dev);
2255 r = request_threaded_irq(iommu->dev->irq,
2256 amd_iommu_int_handler,
2257 amd_iommu_int_thread,
2262 pci_disable_msi(iommu->dev);
2273 dest_mode_logical : 1,
2280 } __attribute__ ((packed));
2283 static struct irq_chip intcapxt_controller;
2285 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2286 struct irq_data *irqd, bool reserve)
2291 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2292 struct irq_data *irqd)
2297 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2298 unsigned int nr_irqs, void *arg)
2300 struct irq_alloc_info *info = arg;
2303 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2306 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2310 for (i = virq; i < virq + nr_irqs; i++) {
2311 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2313 irqd->chip = &intcapxt_controller;
2314 irqd->hwirq = info->hwirq;
2315 irqd->chip_data = info->data;
2316 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2322 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2323 unsigned int nr_irqs)
2325 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2329 static void intcapxt_unmask_irq(struct irq_data *irqd)
2331 struct amd_iommu *iommu = irqd->chip_data;
2332 struct irq_cfg *cfg = irqd_cfg(irqd);
2336 xt.dest_mode_logical = apic->dest_mode_logical;
2337 xt.vector = cfg->vector;
2338 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2339 xt.destid_24_31 = cfg->dest_apicid >> 24;
2341 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
2344 static void intcapxt_mask_irq(struct irq_data *irqd)
2346 struct amd_iommu *iommu = irqd->chip_data;
2348 writeq(0, iommu->mmio_base + irqd->hwirq);
2352 static int intcapxt_set_affinity(struct irq_data *irqd,
2353 const struct cpumask *mask, bool force)
2355 struct irq_data *parent = irqd->parent_data;
2358 ret = parent->chip->irq_set_affinity(parent, mask, force);
2359 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2364 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2366 return on ? -EOPNOTSUPP : 0;
2369 static struct irq_chip intcapxt_controller = {
2370 .name = "IOMMU-MSI",
2371 .irq_unmask = intcapxt_unmask_irq,
2372 .irq_mask = intcapxt_mask_irq,
2373 .irq_ack = irq_chip_ack_parent,
2374 .irq_retrigger = irq_chip_retrigger_hierarchy,
2375 .irq_set_affinity = intcapxt_set_affinity,
2376 .irq_set_wake = intcapxt_set_wake,
2377 .flags = IRQCHIP_MASK_ON_SUSPEND,
2380 static const struct irq_domain_ops intcapxt_domain_ops = {
2381 .alloc = intcapxt_irqdomain_alloc,
2382 .free = intcapxt_irqdomain_free,
2383 .activate = intcapxt_irqdomain_activate,
2384 .deactivate = intcapxt_irqdomain_deactivate,
2388 static struct irq_domain *iommu_irqdomain;
2390 static struct irq_domain *iommu_get_irqdomain(void)
2392 struct fwnode_handle *fn;
2394 /* No need for locking here (yet) as the init is single-threaded */
2395 if (iommu_irqdomain)
2396 return iommu_irqdomain;
2398 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2402 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2403 fn, &intcapxt_domain_ops,
2405 if (!iommu_irqdomain)
2406 irq_domain_free_fwnode(fn);
2408 return iommu_irqdomain;
2411 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
2412 int hwirq, irq_handler_t thread_fn)
2414 struct irq_domain *domain;
2415 struct irq_alloc_info info;
2417 int node = dev_to_node(&iommu->dev->dev);
2419 domain = iommu_get_irqdomain();
2423 init_irq_alloc_info(&info, NULL);
2424 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2428 irq = irq_domain_alloc_irqs(domain, 1, node, &info);
2430 irq_domain_remove(domain);
2434 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2435 thread_fn, 0, devname, iommu);
2437 irq_domain_free_irqs(irq, 1);
2438 irq_domain_remove(domain);
2445 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2449 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
2450 "AMD-Vi%d-Evt", iommu->index);
2451 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
2452 MMIO_INTCAPXT_EVT_OFFSET,
2453 amd_iommu_int_thread_evtlog);
2457 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
2458 "AMD-Vi%d-PPR", iommu->index);
2459 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
2460 MMIO_INTCAPXT_PPR_OFFSET,
2461 amd_iommu_int_thread_pprlog);
2465 #ifdef CONFIG_IRQ_REMAP
2466 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
2467 "AMD-Vi%d-GA", iommu->index);
2468 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
2469 MMIO_INTCAPXT_GALOG_OFFSET,
2470 amd_iommu_int_thread_galog);
2476 static int iommu_init_irq(struct amd_iommu *iommu)
2480 if (iommu->int_enabled)
2483 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2484 ret = iommu_setup_intcapxt(iommu);
2485 else if (iommu->dev->msi_cap)
2486 ret = iommu_setup_msi(iommu);
2493 iommu->int_enabled = true;
2496 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2497 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2499 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2504 /****************************************************************************
2506 * The next functions belong to the third pass of parsing the ACPI
2507 * table. In this last pass the memory mapping requirements are
2508 * gathered (like exclusion and unity mapping ranges).
2510 ****************************************************************************/
2512 static void __init free_unity_maps(void)
2514 struct unity_map_entry *entry, *next;
2515 struct amd_iommu_pci_seg *p, *pci_seg;
2517 for_each_pci_segment_safe(pci_seg, p) {
2518 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
2519 list_del(&entry->list);
2525 /* called for unity map ACPI definition */
2526 static int __init init_unity_map_range(struct ivmd_header *m,
2527 struct acpi_table_header *ivrs_base)
2529 struct unity_map_entry *e = NULL;
2530 struct amd_iommu_pci_seg *pci_seg;
2533 pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
2534 if (pci_seg == NULL)
2537 e = kzalloc(sizeof(*e), GFP_KERNEL);
2545 case ACPI_IVMD_TYPE:
2546 s = "IVMD_TYPEi\t\t\t";
2547 e->devid_start = e->devid_end = m->devid;
2549 case ACPI_IVMD_TYPE_ALL:
2550 s = "IVMD_TYPE_ALL\t\t";
2552 e->devid_end = pci_seg->last_bdf;
2554 case ACPI_IVMD_TYPE_RANGE:
2555 s = "IVMD_TYPE_RANGE\t\t";
2556 e->devid_start = m->devid;
2557 e->devid_end = m->aux;
2560 e->address_start = PAGE_ALIGN(m->range_start);
2561 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2562 e->prot = m->flags >> 1;
2565 * Treat per-device exclusion ranges as r/w unity-mapped regions
2566 * since some buggy BIOSes might lead to the overwritten exclusion
2567 * range (exclusion_start and exclusion_length members). This
2568 * happens when there are multiple exclusion ranges (IVMD entries)
2569 * defined in ACPI table.
2571 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2572 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2574 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
2575 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
2576 " flags: %x\n", s, m->pci_seg,
2577 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2578 PCI_FUNC(e->devid_start), m->pci_seg,
2579 PCI_BUS_NUM(e->devid_end),
2580 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2581 e->address_start, e->address_end, m->flags);
2583 list_add_tail(&e->list, &pci_seg->unity_map);
2588 /* iterates over all memory definitions we find in the ACPI table */
2589 static int __init init_memory_definitions(struct acpi_table_header *table)
2591 u8 *p = (u8 *)table, *end = (u8 *)table;
2592 struct ivmd_header *m;
2594 end += table->length;
2595 p += IVRS_HEADER_LENGTH;
2598 m = (struct ivmd_header *)p;
2599 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2600 init_unity_map_range(m, table);
2609 * Init the device table to not allow DMA access for devices
2611 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2614 struct dev_table_entry *dev_table = pci_seg->dev_table;
2616 if (dev_table == NULL)
2619 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2620 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
2621 if (!amd_iommu_snp_en)
2622 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
2626 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2629 struct dev_table_entry *dev_table = pci_seg->dev_table;
2631 if (dev_table == NULL)
2634 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2635 dev_table[devid].data[0] = 0ULL;
2636 dev_table[devid].data[1] = 0ULL;
2640 static void init_device_table(void)
2642 struct amd_iommu_pci_seg *pci_seg;
2645 if (!amd_iommu_irq_remap)
2648 for_each_pci_segment(pci_seg) {
2649 for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
2650 __set_dev_entry_bit(pci_seg->dev_table,
2651 devid, DEV_ENTRY_IRQ_TBL_EN);
2655 static void iommu_init_flags(struct amd_iommu *iommu)
2657 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2658 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2659 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2661 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2662 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2663 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2665 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2666 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2667 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2669 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2670 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2671 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2674 * make IOMMU memory accesses cache coherent
2676 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2678 /* Set IOTLB invalidation timeout to 1s */
2679 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2682 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2685 u32 ioc_feature_control;
2686 struct pci_dev *pdev = iommu->root_pdev;
2688 /* RD890 BIOSes may not have completely reconfigured the iommu */
2689 if (!is_rd890_iommu(iommu->dev) || !pdev)
2693 * First, we need to ensure that the iommu is enabled. This is
2694 * controlled by a register in the northbridge
2697 /* Select Northbridge indirect register 0x75 and enable writing */
2698 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2699 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2701 /* Enable the iommu */
2702 if (!(ioc_feature_control & 0x1))
2703 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2705 /* Restore the iommu BAR */
2706 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2707 iommu->stored_addr_lo);
2708 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2709 iommu->stored_addr_hi);
2711 /* Restore the l1 indirect regs for each of the 6 l1s */
2712 for (i = 0; i < 6; i++)
2713 for (j = 0; j < 0x12; j++)
2714 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2716 /* Restore the l2 indirect regs */
2717 for (i = 0; i < 0x83; i++)
2718 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2720 /* Lock PCI setup registers */
2721 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2722 iommu->stored_addr_lo | 1);
2725 static void iommu_enable_ga(struct amd_iommu *iommu)
2727 #ifdef CONFIG_IRQ_REMAP
2728 switch (amd_iommu_guest_ir) {
2729 case AMD_IOMMU_GUEST_IR_VAPIC:
2730 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2731 iommu_feature_enable(iommu, CONTROL_GA_EN);
2732 iommu->irte_ops = &irte_128_ops;
2735 iommu->irte_ops = &irte_32_ops;
2741 static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
2743 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
2746 static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
2750 if (!amd_iommu_irtcachedis)
2755 * The support for IRTCacheDis feature is dertermined by
2756 * checking if the bit is writable.
2758 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
2759 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
2760 ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
2762 iommu->irtcachedis_enabled = true;
2763 pr_info("iommu%d (%#06x) : IRT cache is %s\n",
2764 iommu->index, iommu->devid,
2765 iommu->irtcachedis_enabled ? "disabled" : "enabled");
2768 static void early_enable_iommu(struct amd_iommu *iommu)
2770 iommu_disable(iommu);
2771 iommu_init_flags(iommu);
2772 iommu_set_device_table(iommu);
2773 iommu_enable_command_buffer(iommu);
2774 iommu_enable_event_buffer(iommu);
2775 iommu_set_exclusion_range(iommu);
2776 iommu_enable_gt(iommu);
2777 iommu_enable_ga(iommu);
2778 iommu_enable_xt(iommu);
2779 iommu_enable_irtcachedis(iommu);
2780 iommu_enable(iommu);
2781 amd_iommu_flush_all_caches(iommu);
2785 * This function finally enables all IOMMUs found in the system after
2786 * they have been initialized.
2788 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2789 * the old content of device table entries. Not this case or copy failed,
2790 * just continue as normal kernel does.
2792 static void early_enable_iommus(void)
2794 struct amd_iommu *iommu;
2795 struct amd_iommu_pci_seg *pci_seg;
2797 if (!copy_device_table()) {
2799 * If come here because of failure in copying device table from old
2800 * kernel with all IOMMUs enabled, print error message and try to
2801 * free allocated old_dev_tbl_cpy.
2803 if (amd_iommu_pre_enabled)
2804 pr_err("Failed to copy DEV table from previous kernel.\n");
2806 for_each_pci_segment(pci_seg) {
2807 if (pci_seg->old_dev_tbl_cpy != NULL) {
2808 free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
2809 get_order(pci_seg->dev_table_size));
2810 pci_seg->old_dev_tbl_cpy = NULL;
2814 for_each_iommu(iommu) {
2815 clear_translation_pre_enabled(iommu);
2816 early_enable_iommu(iommu);
2819 pr_info("Copied DEV table from previous kernel.\n");
2821 for_each_pci_segment(pci_seg) {
2822 free_pages((unsigned long)pci_seg->dev_table,
2823 get_order(pci_seg->dev_table_size));
2824 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
2827 for_each_iommu(iommu) {
2828 iommu_disable_command_buffer(iommu);
2829 iommu_disable_event_buffer(iommu);
2830 iommu_disable_irtcachedis(iommu);
2831 iommu_enable_command_buffer(iommu);
2832 iommu_enable_event_buffer(iommu);
2833 iommu_enable_gt(iommu);
2834 iommu_enable_ga(iommu);
2835 iommu_enable_xt(iommu);
2836 iommu_enable_irtcachedis(iommu);
2837 iommu_set_device_table(iommu);
2838 amd_iommu_flush_all_caches(iommu);
2843 static void enable_iommus_v2(void)
2845 struct amd_iommu *iommu;
2847 for_each_iommu(iommu)
2848 iommu_enable_ppr_log(iommu);
2851 static void enable_iommus_vapic(void)
2853 #ifdef CONFIG_IRQ_REMAP
2855 struct amd_iommu *iommu;
2857 for_each_iommu(iommu) {
2859 * Disable GALog if already running. It could have been enabled
2860 * in the previous boot before kdump.
2862 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2863 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2866 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2867 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2870 * Need to set and poll check the GALOGRun bit to zero before
2871 * we can set/ modify GA Log registers safely.
2873 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
2874 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2875 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2880 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
2884 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2885 !check_feature(FEATURE_GAM_VAPIC)) {
2886 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2890 if (amd_iommu_snp_en &&
2891 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
2892 pr_warn("Force to disable Virtual APIC due to SNP\n");
2893 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2897 /* Enabling GAM and SNPAVIC support */
2898 for_each_iommu(iommu) {
2899 if (iommu_init_ga_log(iommu) ||
2900 iommu_ga_log_enable(iommu))
2903 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2904 if (amd_iommu_snp_en)
2905 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
2908 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2909 pr_info("Virtual APIC enabled\n");
2913 static void enable_iommus(void)
2915 early_enable_iommus();
2918 static void disable_iommus(void)
2920 struct amd_iommu *iommu;
2922 for_each_iommu(iommu)
2923 iommu_disable(iommu);
2925 #ifdef CONFIG_IRQ_REMAP
2926 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2927 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2932 * Suspend/Resume support
2933 * disable suspend until real resume implemented
2936 static void amd_iommu_resume(void)
2938 struct amd_iommu *iommu;
2940 for_each_iommu(iommu)
2941 iommu_apply_resume_quirks(iommu);
2943 /* re-load the hardware */
2946 amd_iommu_enable_interrupts();
2949 static int amd_iommu_suspend(void)
2951 /* disable IOMMUs to go out of the way for BIOS */
2957 static struct syscore_ops amd_iommu_syscore_ops = {
2958 .suspend = amd_iommu_suspend,
2959 .resume = amd_iommu_resume,
2962 static void __init free_iommu_resources(void)
2964 kmem_cache_destroy(amd_iommu_irq_cache);
2965 amd_iommu_irq_cache = NULL;
2968 free_pci_segments();
2971 /* SB IOAPIC is always on this device in AMD systems */
2972 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2974 static bool __init check_ioapic_information(void)
2976 const char *fw_bug = FW_BUG;
2977 bool ret, has_sb_ioapic;
2980 has_sb_ioapic = false;
2984 * If we have map overrides on the kernel command line the
2985 * messages in this function might not describe firmware bugs
2986 * anymore - so be careful
2991 for (idx = 0; idx < nr_ioapics; idx++) {
2992 int devid, id = mpc_ioapic_id(idx);
2994 devid = get_ioapic_devid(id);
2996 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2999 } else if (devid == IOAPIC_SB_DEVID) {
3000 has_sb_ioapic = true;
3005 if (!has_sb_ioapic) {
3007 * We expect the SB IOAPIC to be listed in the IVRS
3008 * table. The system timer is connected to the SB IOAPIC
3009 * and if we don't have it in the list the system will
3010 * panic at boot time. This situation usually happens
3011 * when the BIOS is buggy and provides us the wrong
3012 * device id for the IOAPIC in the system.
3014 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
3018 pr_err("Disabling interrupt remapping\n");
3023 static void __init free_dma_resources(void)
3025 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
3026 get_order(MAX_DOMAIN_ID/8));
3027 amd_iommu_pd_alloc_bitmap = NULL;
3032 static void __init ivinfo_init(void *ivrs)
3034 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
3038 * This is the hardware init function for AMD IOMMU in the system.
3039 * This function is called either from amd_iommu_init or from the interrupt
3040 * remapping setup code.
3042 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
3045 * 1 pass) Discover the most comprehensive IVHD type to use.
3047 * 2 pass) Find the highest PCI device id the driver has to handle.
3048 * Upon this information the size of the data structures is
3049 * determined that needs to be allocated.
3051 * 3 pass) Initialize the data structures just allocated with the
3052 * information in the ACPI table about available AMD IOMMUs
3053 * in the system. It also maps the PCI devices in the
3054 * system to specific IOMMUs
3056 * 4 pass) After the basic data structures are allocated and
3057 * initialized we update them with information about memory
3058 * remapping requirements parsed out of the ACPI table in
3061 * After everything is set up the IOMMUs are enabled and the necessary
3062 * hotplug and suspend notifiers are registered.
3064 static int __init early_amd_iommu_init(void)
3066 struct acpi_table_header *ivrs_base;
3067 int remap_cache_sz, ret;
3070 if (!amd_iommu_detected)
3073 status = acpi_get_table("IVRS", 0, &ivrs_base);
3074 if (status == AE_NOT_FOUND)
3076 else if (ACPI_FAILURE(status)) {
3077 const char *err = acpi_format_exception(status);
3078 pr_err("IVRS table error: %s\n", err);
3083 * Validate checksum here so we don't need to do it when
3084 * we actually parse the table
3086 ret = check_ivrs_checksum(ivrs_base);
3090 ivinfo_init(ivrs_base);
3092 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
3093 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
3095 /* Device table - directly used by all IOMMUs */
3098 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
3099 GFP_KERNEL | __GFP_ZERO,
3100 get_order(MAX_DOMAIN_ID/8));
3101 if (amd_iommu_pd_alloc_bitmap == NULL)
3105 * never allocate domain 0 because its used as the non-allocated and
3106 * error value placeholder
3108 __set_bit(0, amd_iommu_pd_alloc_bitmap);
3111 * now the data structures are allocated and basically initialized
3112 * start the real acpi table scan
3114 ret = init_iommu_all(ivrs_base);
3118 /* 5 level guest page table */
3119 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3120 check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL)
3121 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
3123 /* Disable any previously enabled IOMMUs */
3124 if (!is_kdump_kernel() || amd_iommu_disabled)
3127 if (amd_iommu_irq_remap)
3128 amd_iommu_irq_remap = check_ioapic_information();
3130 if (amd_iommu_irq_remap) {
3131 struct amd_iommu_pci_seg *pci_seg;
3133 * Interrupt remapping enabled, create kmem_cache for the
3137 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3138 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
3140 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
3141 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
3143 DTE_INTTAB_ALIGNMENT,
3145 if (!amd_iommu_irq_cache)
3148 for_each_pci_segment(pci_seg) {
3149 if (alloc_irq_lookup_table(pci_seg))
3154 ret = init_memory_definitions(ivrs_base);
3158 /* init the device table */
3159 init_device_table();
3162 /* Don't leak any ACPI memory */
3163 acpi_put_table(ivrs_base);
3168 static int amd_iommu_enable_interrupts(void)
3170 struct amd_iommu *iommu;
3173 for_each_iommu(iommu) {
3174 ret = iommu_init_irq(iommu);
3180 * Interrupt handler is ready to process interrupts. Enable
3181 * PPR and GA log interrupt for all IOMMUs.
3183 enable_iommus_vapic();
3190 static bool __init detect_ivrs(void)
3192 struct acpi_table_header *ivrs_base;
3196 status = acpi_get_table("IVRS", 0, &ivrs_base);
3197 if (status == AE_NOT_FOUND)
3199 else if (ACPI_FAILURE(status)) {
3200 const char *err = acpi_format_exception(status);
3201 pr_err("IVRS table error: %s\n", err);
3205 acpi_put_table(ivrs_base);
3207 if (amd_iommu_force_enable)
3210 /* Don't use IOMMU if there is Stoney Ridge graphics */
3211 for (i = 0; i < 32; i++) {
3214 pci_id = read_pci_config(0, i, 0, 0);
3215 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3216 pr_info("Disable IOMMU on Stoney Ridge\n");
3222 /* Make sure ACS will be enabled during PCI probe */
3228 static void iommu_snp_enable(void)
3230 #ifdef CONFIG_KVM_AMD_SEV
3231 if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
3234 * The SNP support requires that IOMMU must be enabled, and is
3235 * not configured in the passthrough mode.
3237 if (no_iommu || iommu_default_passthrough()) {
3238 pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
3242 amd_iommu_snp_en = check_feature(FEATURE_SNP);
3243 if (!amd_iommu_snp_en) {
3244 pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
3248 pr_info("IOMMU SNP support enabled.\n");
3250 /* Enforce IOMMU v1 pagetable when SNP is enabled. */
3251 if (amd_iommu_pgtable != AMD_IOMMU_V1) {
3252 pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
3253 amd_iommu_pgtable = AMD_IOMMU_V1;
3258 /****************************************************************************
3260 * AMD IOMMU Initialization State Machine
3262 ****************************************************************************/
3264 static int __init state_next(void)
3268 switch (init_state) {
3269 case IOMMU_START_STATE:
3270 if (!detect_ivrs()) {
3271 init_state = IOMMU_NOT_FOUND;
3274 init_state = IOMMU_IVRS_DETECTED;
3277 case IOMMU_IVRS_DETECTED:
3278 if (amd_iommu_disabled) {
3279 init_state = IOMMU_CMDLINE_DISABLED;
3282 ret = early_amd_iommu_init();
3283 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3286 case IOMMU_ACPI_FINISHED:
3287 early_enable_iommus();
3288 x86_platform.iommu_shutdown = disable_iommus;
3289 init_state = IOMMU_ENABLED;
3292 register_syscore_ops(&amd_iommu_syscore_ops);
3294 ret = amd_iommu_init_pci();
3295 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
3297 case IOMMU_PCI_INIT:
3298 ret = amd_iommu_enable_interrupts();
3299 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
3301 case IOMMU_INTERRUPTS_EN:
3302 init_state = IOMMU_INITIALIZED;
3304 case IOMMU_INITIALIZED:
3307 case IOMMU_NOT_FOUND:
3308 case IOMMU_INIT_ERROR:
3309 case IOMMU_CMDLINE_DISABLED:
3310 /* Error states => do nothing */
3319 free_dma_resources();
3320 if (!irq_remapping_enabled) {
3322 free_iommu_resources();
3324 struct amd_iommu *iommu;
3325 struct amd_iommu_pci_seg *pci_seg;
3327 for_each_pci_segment(pci_seg)
3328 uninit_device_table_dma(pci_seg);
3330 for_each_iommu(iommu)
3331 amd_iommu_flush_all_caches(iommu);
3337 static int __init iommu_go_to_state(enum iommu_init_state state)
3341 while (init_state != state) {
3342 if (init_state == IOMMU_NOT_FOUND ||
3343 init_state == IOMMU_INIT_ERROR ||
3344 init_state == IOMMU_CMDLINE_DISABLED)
3352 #ifdef CONFIG_IRQ_REMAP
3353 int __init amd_iommu_prepare(void)
3357 amd_iommu_irq_remap = true;
3359 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3361 amd_iommu_irq_remap = false;
3365 return amd_iommu_irq_remap ? 0 : -ENODEV;
3368 int __init amd_iommu_enable(void)
3372 ret = iommu_go_to_state(IOMMU_ENABLED);
3376 irq_remapping_enabled = 1;
3377 return amd_iommu_xt_mode;
3380 void amd_iommu_disable(void)
3382 amd_iommu_suspend();
3385 int amd_iommu_reenable(int mode)
3392 int __init amd_iommu_enable_faulting(void)
3394 /* We enable MSI later when PCI is initialized */
3400 * This is the core init function for AMD IOMMU hardware in the system.
3401 * This function is called from the generic x86 DMA layer initialization
3404 static int __init amd_iommu_init(void)
3406 struct amd_iommu *iommu;
3409 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3410 #ifdef CONFIG_GART_IOMMU
3411 if (ret && list_empty(&amd_iommu_list)) {
3413 * We failed to initialize the AMD IOMMU - try fallback
3414 * to GART if possible.
3420 for_each_iommu(iommu)
3421 amd_iommu_debugfs_setup(iommu);
3426 static bool amd_iommu_sme_check(void)
3428 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3429 (boot_cpu_data.x86 != 0x17))
3432 /* For Fam17h, a specific level of support is required */
3433 if (boot_cpu_data.microcode >= 0x08001205)
3436 if ((boot_cpu_data.microcode >= 0x08001126) &&
3437 (boot_cpu_data.microcode <= 0x080011ff))
3440 pr_notice("IOMMU not currently supported when SME is active\n");
3445 /****************************************************************************
3447 * Early detect code. This code runs at IOMMU detection time in the DMA
3448 * layer. It just looks if there is an IVRS ACPI table to detect AMD
3451 ****************************************************************************/
3452 int __init amd_iommu_detect(void)
3456 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3459 if (!amd_iommu_sme_check())
3462 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3466 amd_iommu_detected = true;
3468 x86_init.iommu.iommu_init = amd_iommu_init;
3473 /****************************************************************************
3475 * Parsing functions for the AMD IOMMU specific kernel command line
3478 ****************************************************************************/
3480 static int __init parse_amd_iommu_dump(char *str)
3482 amd_iommu_dump = true;
3487 static int __init parse_amd_iommu_intr(char *str)
3489 for (; *str; ++str) {
3490 if (strncmp(str, "legacy", 6) == 0) {
3491 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3494 if (strncmp(str, "vapic", 5) == 0) {
3495 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3502 static int __init parse_amd_iommu_options(char *str)
3508 if (strncmp(str, "fullflush", 9) == 0) {
3509 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3510 iommu_set_dma_strict();
3511 } else if (strncmp(str, "force_enable", 12) == 0) {
3512 amd_iommu_force_enable = true;
3513 } else if (strncmp(str, "off", 3) == 0) {
3514 amd_iommu_disabled = true;
3515 } else if (strncmp(str, "force_isolation", 15) == 0) {
3516 amd_iommu_force_isolation = true;
3517 } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
3518 amd_iommu_pgtable = AMD_IOMMU_V1;
3519 } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
3520 amd_iommu_pgtable = AMD_IOMMU_V2;
3521 } else if (strncmp(str, "irtcachedis", 11) == 0) {
3522 amd_iommu_irtcachedis = true;
3524 pr_notice("Unknown option - '%s'\n", str);
3527 str += strcspn(str, ",");
3535 static int __init parse_ivrs_ioapic(char *str)
3537 u32 seg = 0, bus, dev, fn;
3541 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3542 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3545 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3546 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3547 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
3548 str, id, seg, bus, dev, fn);
3552 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3556 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3557 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3562 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3564 cmdline_maps = true;
3565 i = early_ioapic_map_size++;
3566 early_ioapic_map[i].id = id;
3567 early_ioapic_map[i].devid = devid;
3568 early_ioapic_map[i].cmd_line = true;
3573 static int __init parse_ivrs_hpet(char *str)
3575 u32 seg = 0, bus, dev, fn;
3579 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3580 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3583 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3584 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3585 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
3586 str, id, seg, bus, dev, fn);
3590 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3594 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3595 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3600 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3602 cmdline_maps = true;
3603 i = early_hpet_map_size++;
3604 early_hpet_map[i].id = id;
3605 early_hpet_map[i].devid = devid;
3606 early_hpet_map[i].cmd_line = true;
3611 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
3613 static int __init parse_ivrs_acpihid(char *str)
3615 u32 seg = 0, bus, dev, fn;
3616 char *hid, *uid, *p, *addr;
3617 char acpiid[ACPIID_LEN] = {0};
3620 addr = strchr(str, '@');
3622 addr = strchr(str, '=');
3628 if (strlen(addr) > ACPIID_LEN)
3631 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
3632 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
3633 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
3634 str, acpiid, seg, bus, dev, fn);
3640 /* We have the '@', make it the terminator to get just the acpiid */
3643 if (strlen(str) > ACPIID_LEN + 1)
3646 if (sscanf(str, "=%s", acpiid) != 1)
3649 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
3650 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
3654 pr_err("Invalid command line: ivrs_acpihid%s\n", str);
3659 hid = strsep(&p, ":");
3662 if (!hid || !(*hid) || !uid) {
3663 pr_err("Invalid command line: hid or uid\n");
3668 * Ignore leading zeroes after ':', so e.g., AMDI0095:00
3669 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
3671 while (*uid == '0' && *(uid + 1))
3674 i = early_acpihid_map_size++;
3675 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3676 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3677 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3678 early_acpihid_map[i].cmd_line = true;
3683 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3684 __setup("amd_iommu=", parse_amd_iommu_options);
3685 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3686 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3687 __setup("ivrs_hpet", parse_ivrs_hpet);
3688 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3690 bool amd_iommu_v2_supported(void)
3692 /* CPU page table size should match IOMMU guest page table size */
3693 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3694 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
3698 * Since DTE[Mode]=0 is prohibited on SNP-enabled system
3699 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
3700 * setting up IOMMUv1 page table.
3702 return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
3705 struct amd_iommu *get_amd_iommu(unsigned int idx)
3708 struct amd_iommu *iommu;
3710 for_each_iommu(iommu)
3716 /****************************************************************************
3718 * IOMMU EFR Performance Counter support functionality. This code allows
3719 * access to the IOMMU PC functionality.
3721 ****************************************************************************/
3723 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3725 struct amd_iommu *iommu = get_amd_iommu(idx);
3728 return iommu->max_banks;
3733 bool amd_iommu_pc_supported(void)
3735 return amd_iommu_pc_present;
3738 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3740 struct amd_iommu *iommu = get_amd_iommu(idx);
3743 return iommu->max_counters;
3748 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3749 u8 fxn, u64 *value, bool is_write)
3754 /* Make sure the IOMMU PC resource is available */
3755 if (!amd_iommu_pc_present)
3758 /* Check for valid iommu and pc register indexing */
3759 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3762 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3764 /* Limit the offset to the hw defined mmio region aperture */
3765 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3766 (iommu->max_counters << 8) | 0x28);
3767 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3768 (offset > max_offset_lim))
3772 u64 val = *value & GENMASK_ULL(47, 0);
3774 writel((u32)val, iommu->mmio_base + offset);
3775 writel((val >> 32), iommu->mmio_base + offset + 4);
3777 *value = readl(iommu->mmio_base + offset + 4);
3779 *value |= readl(iommu->mmio_base + offset);
3780 *value &= GENMASK_ULL(47, 0);
3786 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3791 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3794 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3799 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3802 #ifdef CONFIG_KVM_AMD_SEV
3803 static int iommu_page_make_shared(void *page)
3805 unsigned long paddr, pfn;
3807 paddr = iommu_virt_to_phys(page);
3808 /* Cbit maybe set in the paddr */
3809 pfn = __sme_clr(paddr) >> PAGE_SHIFT;
3811 if (!(pfn % PTRS_PER_PMD)) {
3815 ret = snp_lookup_rmpentry(pfn, &assigned, &level);
3817 pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
3822 pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
3826 if (level > PG_LEVEL_4K) {
3831 pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
3838 return rmp_make_shared(pfn, PG_LEVEL_4K);
3841 static int iommu_make_shared(void *va, size_t size)
3849 for (page = va; page < (va + size); page += PAGE_SIZE) {
3850 ret = iommu_page_make_shared(page);
3858 int amd_iommu_snp_disable(void)
3860 struct amd_iommu *iommu;
3863 if (!amd_iommu_snp_en)
3866 for_each_iommu(iommu) {
3867 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
3871 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
3875 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
3882 EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);