1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/delay.h>
16 #include <linux/slab.h>
17 #include <linux/syscore_ops.h>
18 #include <linux/interrupt.h>
19 #include <linux/msi.h>
20 #include <linux/irq.h>
21 #include <linux/amd-iommu.h>
22 #include <linux/export.h>
23 #include <linux/kmemleak.h>
24 #include <linux/mem_encrypt.h>
25 #include <asm/pci-direct.h>
26 #include <asm/iommu.h>
29 #include <asm/x86_init.h>
30 #include <asm/iommu_table.h>
31 #include <asm/io_apic.h>
32 #include <asm/irq_remapping.h>
33 #include <asm/set_memory.h>
35 #include <linux/crash_dump.h>
37 #include "amd_iommu.h"
38 #include "../irq_remapping.h"
41 * definitions for the ACPI scanning code
43 #define IVRS_HEADER_LENGTH 48
45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46 #define ACPI_IVMD_TYPE_ALL 0x20
47 #define ACPI_IVMD_TYPE 0x21
48 #define ACPI_IVMD_TYPE_RANGE 0x22
50 #define IVHD_DEV_ALL 0x01
51 #define IVHD_DEV_SELECT 0x02
52 #define IVHD_DEV_SELECT_RANGE_START 0x03
53 #define IVHD_DEV_RANGE_END 0x04
54 #define IVHD_DEV_ALIAS 0x42
55 #define IVHD_DEV_ALIAS_RANGE 0x43
56 #define IVHD_DEV_EXT_SELECT 0x46
57 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
58 #define IVHD_DEV_SPECIAL 0x48
59 #define IVHD_DEV_ACPI_HID 0xf0
61 #define UID_NOT_PRESENT 0
62 #define UID_IS_INTEGER 1
63 #define UID_IS_CHARACTER 2
65 #define IVHD_SPECIAL_IOAPIC 1
66 #define IVHD_SPECIAL_HPET 2
68 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
70 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71 #define IVHD_FLAG_ISOC_EN_MASK 0x08
73 #define IVMD_FLAG_EXCL_RANGE 0x08
74 #define IVMD_FLAG_IW 0x04
75 #define IVMD_FLAG_IR 0x02
76 #define IVMD_FLAG_UNITY_MAP 0x01
78 #define ACPI_DEVFLAG_INITPASS 0x01
79 #define ACPI_DEVFLAG_EXTINT 0x02
80 #define ACPI_DEVFLAG_NMI 0x04
81 #define ACPI_DEVFLAG_SYSMGT1 0x10
82 #define ACPI_DEVFLAG_SYSMGT2 0x20
83 #define ACPI_DEVFLAG_LINT0 0x40
84 #define ACPI_DEVFLAG_LINT1 0x80
85 #define ACPI_DEVFLAG_ATSDIS 0x10000000
87 #define LOOP_TIMEOUT 100000
89 * ACPI table definitions
91 * These data structures are laid over the table to parse the important values
95 extern const struct iommu_ops amd_iommu_ops;
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys.
112 /* Following only valid on IVHD type 11h and 40h */
113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
115 } __attribute__((packed));
118 * A device entry describing which devices a specific IOMMU translates and
119 * which requestor ids they use.
131 } __attribute__((packed));
134 * An AMD IOMMU memory definition structure. It defines things like exclusion
135 * ranges for devices and regions that should be unity mapped.
146 } __attribute__((packed));
149 bool amd_iommu_irq_remap __read_mostly;
151 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
153 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
154 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
156 static bool amd_iommu_detected;
157 static bool __initdata amd_iommu_disabled;
158 static int amd_iommu_target_ivhd_type;
160 u16 amd_iommu_last_bdf; /* largest PCI device id we have
162 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
164 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
166 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
169 /* Array to assign indices to IOMMUs*/
170 struct amd_iommu *amd_iommus[MAX_IOMMUS];
172 /* Number of IOMMUs present in the system */
173 static int amd_iommus_present;
175 /* IOMMUs have a non-present cache? */
176 bool amd_iommu_np_cache __read_mostly;
177 bool amd_iommu_iotlb_sup __read_mostly = true;
179 u32 amd_iommu_max_pasid __read_mostly = ~0;
181 bool amd_iommu_v2_present __read_mostly;
182 static bool amd_iommu_pc_present __read_mostly;
184 bool amd_iommu_force_isolation __read_mostly;
187 * Pointer to the device table which is shared by all AMD IOMMUs
188 * it is indexed by the PCI device id or the HT unit id and contains
189 * information about the domain the device belongs to as well as the
190 * page table root pointer.
192 struct dev_table_entry *amd_iommu_dev_table;
194 * Pointer to a device table which the content of old device table
195 * will be copied to. It's only be used in kdump kernel.
197 static struct dev_table_entry *old_dev_tbl_cpy;
200 * The alias table is a driver specific data structure which contains the
201 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
202 * More than one device can share the same requestor id.
204 u16 *amd_iommu_alias_table;
207 * The rlookup table is used to find the IOMMU which is responsible
208 * for a specific device. It is also indexed by the PCI device id.
210 struct amd_iommu **amd_iommu_rlookup_table;
213 * This table is used to find the irq remapping table for a given device id
216 struct irq_remap_table **irq_lookup_table;
219 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
220 * to know which ones are already in use.
222 unsigned long *amd_iommu_pd_alloc_bitmap;
224 static u32 dev_table_size; /* size of the device table */
225 static u32 alias_table_size; /* size of the alias table */
226 static u32 rlookup_table_size; /* size if the rlookup table */
228 enum iommu_init_state {
239 IOMMU_CMDLINE_DISABLED,
242 /* Early ioapic and hpet maps from kernel command line */
243 #define EARLY_MAP_SIZE 4
244 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
245 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
246 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
248 static int __initdata early_ioapic_map_size;
249 static int __initdata early_hpet_map_size;
250 static int __initdata early_acpihid_map_size;
252 static bool __initdata cmdline_maps;
254 static enum iommu_init_state init_state = IOMMU_START_STATE;
256 static int amd_iommu_enable_interrupts(void);
257 static int __init iommu_go_to_state(enum iommu_init_state state);
258 static void init_device_table_dma(void);
259 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
260 u8 fxn, u64 *value, bool is_write);
262 static bool amd_iommu_pre_enabled = true;
264 static u32 amd_iommu_ivinfo __initdata;
266 bool translation_pre_enabled(struct amd_iommu *iommu)
268 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
271 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
273 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
276 static void init_translation_status(struct amd_iommu *iommu)
280 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
281 if (ctrl & (1<<CONTROL_IOMMU_EN))
282 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
285 static inline void update_last_devid(u16 devid)
287 if (devid > amd_iommu_last_bdf)
288 amd_iommu_last_bdf = devid;
291 static inline unsigned long tbl_size(int entry_size)
293 unsigned shift = PAGE_SHIFT +
294 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
299 int amd_iommu_get_num_iommus(void)
301 return amd_iommus_present;
305 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
306 * Default to IVHD EFR since it is available sooner
307 * (i.e. before PCI init).
309 static void __init early_iommu_features_init(struct amd_iommu *iommu,
310 struct ivhd_header *h)
312 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
313 iommu->features = h->efr_reg;
316 /* Access to l1 and l2 indexed register spaces */
318 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
322 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
323 pci_read_config_dword(iommu->dev, 0xfc, &val);
327 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
329 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
330 pci_write_config_dword(iommu->dev, 0xfc, val);
331 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
334 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
338 pci_write_config_dword(iommu->dev, 0xf0, address);
339 pci_read_config_dword(iommu->dev, 0xf4, &val);
343 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
345 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
346 pci_write_config_dword(iommu->dev, 0xf4, val);
349 /****************************************************************************
351 * AMD IOMMU MMIO register space handling functions
353 * These functions are used to program the IOMMU device registers in
354 * MMIO space required for that driver.
356 ****************************************************************************/
359 * This function set the exclusion range in the IOMMU. DMA accesses to the
360 * exclusion range are passed through untranslated
362 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
364 u64 start = iommu->exclusion_start & PAGE_MASK;
365 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
368 if (!iommu->exclusion_start)
371 entry = start | MMIO_EXCL_ENABLE_MASK;
372 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
373 &entry, sizeof(entry));
376 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
377 &entry, sizeof(entry));
380 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
382 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
383 u64 entry = start & PM_ADDR_MASK;
385 if (!iommu_feature(iommu, FEATURE_SNP))
389 * Re-purpose Exclusion base/limit registers for Completion wait
390 * write-back base/limit.
392 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
393 &entry, sizeof(entry));
396 * Default to 4 Kbytes, which can be specified by setting base
397 * address equal to the limit address.
399 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
400 &entry, sizeof(entry));
403 /* Programs the physical address of the device table into the IOMMU hardware */
404 static void iommu_set_device_table(struct amd_iommu *iommu)
408 BUG_ON(iommu->mmio_base == NULL);
410 entry = iommu_virt_to_phys(amd_iommu_dev_table);
411 entry |= (dev_table_size >> 12) - 1;
412 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
413 &entry, sizeof(entry));
416 /* Generic functions to enable/disable certain features of the IOMMU. */
417 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
421 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
422 ctrl |= (1ULL << bit);
423 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
426 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
430 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
431 ctrl &= ~(1ULL << bit);
432 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
435 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
439 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
440 ctrl &= ~CTRL_INV_TO_MASK;
441 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
442 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
445 /* Function to enable the hardware */
446 static void iommu_enable(struct amd_iommu *iommu)
448 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
451 static void iommu_disable(struct amd_iommu *iommu)
453 if (!iommu->mmio_base)
456 /* Disable command buffer */
457 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
459 /* Disable event logging and event interrupts */
460 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
461 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
463 /* Disable IOMMU GA_LOG */
464 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
465 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
467 /* Disable IOMMU hardware itself */
468 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
472 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
473 * the system has one.
475 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
477 if (!request_mem_region(address, end, "amd_iommu")) {
478 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
480 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
484 return (u8 __iomem *)ioremap(address, end);
487 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
489 if (iommu->mmio_base)
490 iounmap(iommu->mmio_base);
491 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
494 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
510 /****************************************************************************
512 * The functions below belong to the first pass of AMD IOMMU ACPI table
513 * parsing. In this pass we try to find out the highest device id this
514 * code has to handle. Upon this information the size of the shared data
515 * structures is determined later.
517 ****************************************************************************/
520 * This function calculates the length of a given IVHD entry
522 static inline int ivhd_entry_length(u8 *ivhd)
524 u32 type = ((struct ivhd_entry *)ivhd)->type;
527 return 0x04 << (*ivhd >> 6);
528 } else if (type == IVHD_DEV_ACPI_HID) {
529 /* For ACPI_HID, offset 21 is uid len */
530 return *((u8 *)ivhd + 21) + 22;
536 * After reading the highest device id from the IOMMU PCI capability header
537 * this function looks if there is a higher device id defined in the ACPI table
539 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
541 u8 *p = (void *)h, *end = (void *)h;
542 struct ivhd_entry *dev;
544 u32 ivhd_size = get_ivhd_header_size(h);
547 pr_err("Unsupported IVHD type %#x\n", h->type);
555 dev = (struct ivhd_entry *)p;
558 /* Use maximum BDF value for DEV_ALL */
559 update_last_devid(0xffff);
561 case IVHD_DEV_SELECT:
562 case IVHD_DEV_RANGE_END:
564 case IVHD_DEV_EXT_SELECT:
565 /* all the above subfield types refer to device ids */
566 update_last_devid(dev->devid);
571 p += ivhd_entry_length(p);
579 static int __init check_ivrs_checksum(struct acpi_table_header *table)
582 u8 checksum = 0, *p = (u8 *)table;
584 for (i = 0; i < table->length; ++i)
587 /* ACPI table corrupt */
588 pr_err(FW_BUG "IVRS invalid checksum\n");
596 * Iterate over all IVHD entries in the ACPI table and find the highest device
597 * id which we need to handle. This is the first of three functions which parse
598 * the ACPI table. So we check the checksum here.
600 static int __init find_last_devid_acpi(struct acpi_table_header *table)
602 u8 *p = (u8 *)table, *end = (u8 *)table;
603 struct ivhd_header *h;
605 p += IVRS_HEADER_LENGTH;
607 end += table->length;
609 h = (struct ivhd_header *)p;
610 if (h->type == amd_iommu_target_ivhd_type) {
611 int ret = find_last_devid_from_ivhd(h);
623 /****************************************************************************
625 * The following functions belong to the code path which parses the ACPI table
626 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
627 * data structures, initialize the device/alias/rlookup table and also
628 * basically initialize the hardware.
630 ****************************************************************************/
633 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
634 * write commands to that buffer later and the IOMMU will execute them
637 static int __init alloc_command_buffer(struct amd_iommu *iommu)
639 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
640 get_order(CMD_BUFFER_SIZE));
642 return iommu->cmd_buf ? 0 : -ENOMEM;
646 * This function resets the command buffer if the IOMMU stopped fetching
649 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
651 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
653 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
654 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
655 iommu->cmd_buf_head = 0;
656 iommu->cmd_buf_tail = 0;
658 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
662 * This function writes the command buffer address to the hardware and
665 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
669 BUG_ON(iommu->cmd_buf == NULL);
671 entry = iommu_virt_to_phys(iommu->cmd_buf);
672 entry |= MMIO_CMD_SIZE_512;
674 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
675 &entry, sizeof(entry));
677 amd_iommu_reset_cmd_buffer(iommu);
681 * This function disables the command buffer
683 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
685 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
688 static void __init free_command_buffer(struct amd_iommu *iommu)
690 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
693 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
694 gfp_t gfp, size_t size)
696 int order = get_order(size);
697 void *buf = (void *)__get_free_pages(gfp, order);
700 iommu_feature(iommu, FEATURE_SNP) &&
701 set_memory_4k((unsigned long)buf, (1 << order))) {
702 free_pages((unsigned long)buf, order);
709 /* allocates the memory where the IOMMU will log its events to */
710 static int __init alloc_event_buffer(struct amd_iommu *iommu)
712 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
715 return iommu->evt_buf ? 0 : -ENOMEM;
718 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
722 BUG_ON(iommu->evt_buf == NULL);
724 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
726 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
727 &entry, sizeof(entry));
729 /* set head and tail to zero manually */
730 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
731 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
733 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
737 * This function disables the event log buffer
739 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
741 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
744 static void __init free_event_buffer(struct amd_iommu *iommu)
746 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
749 /* allocates the memory where the IOMMU will log its events to */
750 static int __init alloc_ppr_log(struct amd_iommu *iommu)
752 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
755 return iommu->ppr_log ? 0 : -ENOMEM;
758 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
762 if (iommu->ppr_log == NULL)
765 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
767 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
768 &entry, sizeof(entry));
770 /* set head and tail to zero manually */
771 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
772 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
774 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
775 iommu_feature_enable(iommu, CONTROL_PPR_EN);
778 static void __init free_ppr_log(struct amd_iommu *iommu)
780 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
783 static void free_ga_log(struct amd_iommu *iommu)
785 #ifdef CONFIG_IRQ_REMAP
786 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
787 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
791 static int iommu_ga_log_enable(struct amd_iommu *iommu)
793 #ifdef CONFIG_IRQ_REMAP
799 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
801 /* Check if already running */
802 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
805 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
806 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
808 for (i = 0; i < LOOP_TIMEOUT; ++i) {
809 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
810 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
814 if (i >= LOOP_TIMEOUT)
816 #endif /* CONFIG_IRQ_REMAP */
820 #ifdef CONFIG_IRQ_REMAP
821 static int iommu_init_ga_log(struct amd_iommu *iommu)
825 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
828 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
829 get_order(GA_LOG_SIZE));
833 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
835 if (!iommu->ga_log_tail)
838 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
839 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
840 &entry, sizeof(entry));
841 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
842 (BIT_ULL(52)-1)) & ~7ULL;
843 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
844 &entry, sizeof(entry));
845 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
846 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
853 #endif /* CONFIG_IRQ_REMAP */
855 static int iommu_init_ga(struct amd_iommu *iommu)
859 #ifdef CONFIG_IRQ_REMAP
860 /* Note: We have already checked GASup from IVRS table.
861 * Now, we need to make sure that GAMSup is set.
863 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
864 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
865 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
867 ret = iommu_init_ga_log(iommu);
868 #endif /* CONFIG_IRQ_REMAP */
873 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
875 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
877 return iommu->cmd_sem ? 0 : -ENOMEM;
880 static void __init free_cwwb_sem(struct amd_iommu *iommu)
883 free_page((unsigned long)iommu->cmd_sem);
886 static void iommu_enable_xt(struct amd_iommu *iommu)
888 #ifdef CONFIG_IRQ_REMAP
890 * XT mode (32-bit APIC destination ID) requires
891 * GA mode (128-bit IRTE support) as a prerequisite.
893 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
894 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
895 iommu_feature_enable(iommu, CONTROL_XT_EN);
896 #endif /* CONFIG_IRQ_REMAP */
899 static void iommu_enable_gt(struct amd_iommu *iommu)
901 if (!iommu_feature(iommu, FEATURE_GT))
904 iommu_feature_enable(iommu, CONTROL_GT_EN);
907 /* sets a specific bit in the device table entry. */
908 static void set_dev_entry_bit(u16 devid, u8 bit)
910 int i = (bit >> 6) & 0x03;
911 int _bit = bit & 0x3f;
913 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
916 static int get_dev_entry_bit(u16 devid, u8 bit)
918 int i = (bit >> 6) & 0x03;
919 int _bit = bit & 0x3f;
921 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
925 static bool copy_device_table(void)
927 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
928 struct dev_table_entry *old_devtb = NULL;
929 u32 lo, hi, devid, old_devtb_size;
930 phys_addr_t old_devtb_phys;
931 struct amd_iommu *iommu;
932 u16 dom_id, dte_v, irq_v;
936 if (!amd_iommu_pre_enabled)
939 pr_warn("Translation is already enabled - trying to copy translation structures\n");
940 for_each_iommu(iommu) {
941 /* All IOMMUs should use the same device table with the same size */
942 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
943 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
944 entry = (((u64) hi) << 32) + lo;
945 if (last_entry && last_entry != entry) {
946 pr_err("IOMMU:%d should use the same dev table as others!\n",
952 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
953 if (old_devtb_size != dev_table_size) {
954 pr_err("The device table size of IOMMU:%d is not expected!\n",
961 * When SME is enabled in the first kernel, the entry includes the
962 * memory encryption mask(sme_me_mask), we must remove the memory
963 * encryption mask to obtain the true physical address in kdump kernel.
965 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
967 if (old_devtb_phys >= 0x100000000ULL) {
968 pr_err("The address of old device table is above 4G, not trustworthy!\n");
971 old_devtb = (sme_active() && is_kdump_kernel())
972 ? (__force void *)ioremap_encrypted(old_devtb_phys,
974 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
979 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
980 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
981 get_order(dev_table_size));
982 if (old_dev_tbl_cpy == NULL) {
983 pr_err("Failed to allocate memory for copying old device table!\n");
987 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
988 old_dev_tbl_cpy[devid] = old_devtb[devid];
989 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
990 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
992 if (dte_v && dom_id) {
993 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
994 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
995 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
996 /* If gcr3 table existed, mask it out */
997 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
998 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
999 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1000 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1001 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1003 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1007 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1008 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1009 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1010 if (irq_v && (int_ctl || int_tab_len)) {
1011 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1012 (int_tab_len != DTE_INTTABLEN)) {
1013 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1017 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1020 memunmap(old_devtb);
1025 void amd_iommu_apply_erratum_63(u16 devid)
1029 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1030 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1033 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1036 /* Writes the specific IOMMU for a device into the rlookup table */
1037 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1039 amd_iommu_rlookup_table[devid] = iommu;
1043 * This function takes the device specific flags read from the ACPI
1044 * table and sets up the device table entry with that information
1046 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1047 u16 devid, u32 flags, u32 ext_flags)
1049 if (flags & ACPI_DEVFLAG_INITPASS)
1050 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1051 if (flags & ACPI_DEVFLAG_EXTINT)
1052 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1053 if (flags & ACPI_DEVFLAG_NMI)
1054 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1055 if (flags & ACPI_DEVFLAG_SYSMGT1)
1056 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1057 if (flags & ACPI_DEVFLAG_SYSMGT2)
1058 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1059 if (flags & ACPI_DEVFLAG_LINT0)
1060 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1061 if (flags & ACPI_DEVFLAG_LINT1)
1062 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1064 amd_iommu_apply_erratum_63(devid);
1066 set_iommu_for_device(iommu, devid);
1069 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1071 struct devid_map *entry;
1072 struct list_head *list;
1074 if (type == IVHD_SPECIAL_IOAPIC)
1076 else if (type == IVHD_SPECIAL_HPET)
1081 list_for_each_entry(entry, list, list) {
1082 if (!(entry->id == id && entry->cmd_line))
1085 pr_info("Command-line override present for %s id %d - ignoring\n",
1086 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1088 *devid = entry->devid;
1093 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1098 entry->devid = *devid;
1099 entry->cmd_line = cmd_line;
1101 list_add_tail(&entry->list, list);
1106 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1109 struct acpihid_map_entry *entry;
1110 struct list_head *list = &acpihid_map;
1112 list_for_each_entry(entry, list, list) {
1113 if (strcmp(entry->hid, hid) ||
1114 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1118 pr_info("Command-line override for hid:%s uid:%s\n",
1120 *devid = entry->devid;
1124 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1128 memcpy(entry->uid, uid, strlen(uid));
1129 memcpy(entry->hid, hid, strlen(hid));
1130 entry->devid = *devid;
1131 entry->cmd_line = cmd_line;
1132 entry->root_devid = (entry->devid & (~0x7));
1134 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1135 entry->cmd_line ? "cmd" : "ivrs",
1136 entry->hid, entry->uid, entry->root_devid);
1138 list_add_tail(&entry->list, list);
1142 static int __init add_early_maps(void)
1146 for (i = 0; i < early_ioapic_map_size; ++i) {
1147 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1148 early_ioapic_map[i].id,
1149 &early_ioapic_map[i].devid,
1150 early_ioapic_map[i].cmd_line);
1155 for (i = 0; i < early_hpet_map_size; ++i) {
1156 ret = add_special_device(IVHD_SPECIAL_HPET,
1157 early_hpet_map[i].id,
1158 &early_hpet_map[i].devid,
1159 early_hpet_map[i].cmd_line);
1164 for (i = 0; i < early_acpihid_map_size; ++i) {
1165 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1166 early_acpihid_map[i].uid,
1167 &early_acpihid_map[i].devid,
1168 early_acpihid_map[i].cmd_line);
1177 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1178 * initializes the hardware and our data structures with it.
1180 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1181 struct ivhd_header *h)
1184 u8 *end = p, flags = 0;
1185 u16 devid = 0, devid_start = 0, devid_to = 0;
1186 u32 dev_i, ext_flags = 0;
1188 struct ivhd_entry *e;
1193 ret = add_early_maps();
1197 amd_iommu_apply_ivrs_quirks();
1200 * First save the recommended feature enable bits from ACPI
1202 iommu->acpi_flags = h->flags;
1205 * Done. Now parse the device entries
1207 ivhd_size = get_ivhd_header_size(h);
1209 pr_err("Unsupported IVHD type %#x\n", h->type);
1219 e = (struct ivhd_entry *)p;
1223 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1225 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1226 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1228 case IVHD_DEV_SELECT:
1230 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1232 PCI_BUS_NUM(e->devid),
1238 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1240 case IVHD_DEV_SELECT_RANGE_START:
1242 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1243 "devid: %02x:%02x.%x flags: %02x\n",
1244 PCI_BUS_NUM(e->devid),
1249 devid_start = e->devid;
1254 case IVHD_DEV_ALIAS:
1256 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1257 "flags: %02x devid_to: %02x:%02x.%x\n",
1258 PCI_BUS_NUM(e->devid),
1262 PCI_BUS_NUM(e->ext >> 8),
1263 PCI_SLOT(e->ext >> 8),
1264 PCI_FUNC(e->ext >> 8));
1267 devid_to = e->ext >> 8;
1268 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1269 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1270 amd_iommu_alias_table[devid] = devid_to;
1272 case IVHD_DEV_ALIAS_RANGE:
1274 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1275 "devid: %02x:%02x.%x flags: %02x "
1276 "devid_to: %02x:%02x.%x\n",
1277 PCI_BUS_NUM(e->devid),
1281 PCI_BUS_NUM(e->ext >> 8),
1282 PCI_SLOT(e->ext >> 8),
1283 PCI_FUNC(e->ext >> 8));
1285 devid_start = e->devid;
1287 devid_to = e->ext >> 8;
1291 case IVHD_DEV_EXT_SELECT:
1293 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1294 "flags: %02x ext: %08x\n",
1295 PCI_BUS_NUM(e->devid),
1301 set_dev_entry_from_acpi(iommu, devid, e->flags,
1304 case IVHD_DEV_EXT_SELECT_RANGE:
1306 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1307 "%02x:%02x.%x flags: %02x ext: %08x\n",
1308 PCI_BUS_NUM(e->devid),
1313 devid_start = e->devid;
1318 case IVHD_DEV_RANGE_END:
1320 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1321 PCI_BUS_NUM(e->devid),
1323 PCI_FUNC(e->devid));
1326 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1328 amd_iommu_alias_table[dev_i] = devid_to;
1329 set_dev_entry_from_acpi(iommu,
1330 devid_to, flags, ext_flags);
1332 set_dev_entry_from_acpi(iommu, dev_i,
1336 case IVHD_DEV_SPECIAL: {
1342 handle = e->ext & 0xff;
1343 devid = (e->ext >> 8) & 0xffff;
1344 type = (e->ext >> 24) & 0xff;
1346 if (type == IVHD_SPECIAL_IOAPIC)
1348 else if (type == IVHD_SPECIAL_HPET)
1353 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1359 ret = add_special_device(type, handle, &devid, false);
1364 * add_special_device might update the devid in case a
1365 * command-line override is present. So call
1366 * set_dev_entry_from_acpi after add_special_device.
1368 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1372 case IVHD_DEV_ACPI_HID: {
1374 u8 hid[ACPIHID_HID_LEN];
1375 u8 uid[ACPIHID_UID_LEN];
1378 if (h->type != 0x40) {
1379 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1384 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1385 hid[ACPIHID_HID_LEN - 1] = '\0';
1388 pr_err(FW_BUG "Invalid HID.\n");
1394 case UID_NOT_PRESENT:
1397 pr_warn(FW_BUG "Invalid UID length.\n");
1400 case UID_IS_INTEGER:
1402 sprintf(uid, "%d", e->uid);
1405 case UID_IS_CHARACTER:
1407 memcpy(uid, &e->uid, e->uidl);
1408 uid[e->uidl] = '\0';
1416 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1424 ret = add_acpi_hid_device(hid, uid, &devid, false);
1429 * add_special_device might update the devid in case a
1430 * command-line override is present. So call
1431 * set_dev_entry_from_acpi after add_special_device.
1433 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1441 p += ivhd_entry_length(p);
1447 static void __init free_iommu_one(struct amd_iommu *iommu)
1449 free_cwwb_sem(iommu);
1450 free_command_buffer(iommu);
1451 free_event_buffer(iommu);
1452 free_ppr_log(iommu);
1454 iommu_unmap_mmio_space(iommu);
1457 static void __init free_iommu_all(void)
1459 struct amd_iommu *iommu, *next;
1461 for_each_iommu_safe(iommu, next) {
1462 list_del(&iommu->list);
1463 free_iommu_one(iommu);
1469 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1471 * BIOS should disable L2B micellaneous clock gating by setting
1472 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1474 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1478 if ((boot_cpu_data.x86 != 0x15) ||
1479 (boot_cpu_data.x86_model < 0x10) ||
1480 (boot_cpu_data.x86_model > 0x1f))
1483 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1484 pci_read_config_dword(iommu->dev, 0xf4, &value);
1489 /* Select NB indirect register 0x90 and enable writing */
1490 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1492 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1493 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1495 /* Clear the enable writing bit */
1496 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1500 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1502 * BIOS should enable ATS write permission check by setting
1503 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1505 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1509 if ((boot_cpu_data.x86 != 0x15) ||
1510 (boot_cpu_data.x86_model < 0x30) ||
1511 (boot_cpu_data.x86_model > 0x3f))
1514 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1515 value = iommu_read_l2(iommu, 0x47);
1520 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1521 iommu_write_l2(iommu, 0x47, value | BIT(0));
1523 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1527 * This function clues the initialization function for one IOMMU
1528 * together and also allocates the command buffer and programs the
1529 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1531 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1535 raw_spin_lock_init(&iommu->lock);
1536 iommu->cmd_sem_val = 0;
1538 /* Add IOMMU to internal data structures */
1539 list_add_tail(&iommu->list, &amd_iommu_list);
1540 iommu->index = amd_iommus_present++;
1542 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1543 WARN(1, "System has more IOMMUs than supported by this driver\n");
1547 /* Index is fine - add IOMMU to the array */
1548 amd_iommus[iommu->index] = iommu;
1551 * Copy data from ACPI table entry to the iommu struct
1553 iommu->devid = h->devid;
1554 iommu->cap_ptr = h->cap_ptr;
1555 iommu->pci_seg = h->pci_seg;
1556 iommu->mmio_phys = h->mmio_phys;
1560 /* Check if IVHD EFR contains proper max banks/counters */
1561 if ((h->efr_attr != 0) &&
1562 ((h->efr_attr & (0xF << 13)) != 0) &&
1563 ((h->efr_attr & (0x3F << 17)) != 0))
1564 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1566 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1569 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1570 * GAM also requires GA mode. Therefore, we need to
1571 * check cmpxchg16b support before enabling it.
1573 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1574 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1575 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1579 if (h->efr_reg & (1 << 9))
1580 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1582 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1585 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1586 * XT, GAM also requires GA mode. Therefore, we need to
1587 * check cmpxchg16b support before enabling them.
1589 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1590 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1591 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1595 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1596 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1598 early_iommu_features_init(iommu, h);
1605 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1606 iommu->mmio_phys_end);
1607 if (!iommu->mmio_base)
1610 if (alloc_cwwb_sem(iommu))
1613 if (alloc_command_buffer(iommu))
1616 if (alloc_event_buffer(iommu))
1619 iommu->int_enabled = false;
1621 init_translation_status(iommu);
1622 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1623 iommu_disable(iommu);
1624 clear_translation_pre_enabled(iommu);
1625 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1628 if (amd_iommu_pre_enabled)
1629 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1631 ret = init_iommu_from_acpi(iommu, h);
1635 if (amd_iommu_irq_remap) {
1636 ret = amd_iommu_create_irq_domain(iommu);
1642 * Make sure IOMMU is not considered to translate itself. The IVRS
1643 * table tells us so, but this is a lie!
1645 amd_iommu_rlookup_table[iommu->devid] = NULL;
1651 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1652 * @ivrs: Pointer to the IVRS header
1654 * This function search through all IVDB of the maximum supported IVHD
1656 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1658 u8 *base = (u8 *)ivrs;
1659 struct ivhd_header *ivhd = (struct ivhd_header *)
1660 (base + IVRS_HEADER_LENGTH);
1661 u8 last_type = ivhd->type;
1662 u16 devid = ivhd->devid;
1664 while (((u8 *)ivhd - base < ivrs->length) &&
1665 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1666 u8 *p = (u8 *) ivhd;
1668 if (ivhd->devid == devid)
1669 last_type = ivhd->type;
1670 ivhd = (struct ivhd_header *)(p + ivhd->length);
1677 * Iterates over all IOMMU entries in the ACPI table, allocates the
1678 * IOMMU structure and initializes it with init_iommu_one()
1680 static int __init init_iommu_all(struct acpi_table_header *table)
1682 u8 *p = (u8 *)table, *end = (u8 *)table;
1683 struct ivhd_header *h;
1684 struct amd_iommu *iommu;
1687 end += table->length;
1688 p += IVRS_HEADER_LENGTH;
1691 h = (struct ivhd_header *)p;
1692 if (*p == amd_iommu_target_ivhd_type) {
1694 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1695 "seg: %d flags: %01x info %04x\n",
1696 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1697 PCI_FUNC(h->devid), h->cap_ptr,
1698 h->pci_seg, h->flags, h->info);
1699 DUMP_printk(" mmio-addr: %016llx\n",
1702 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1706 ret = init_iommu_one(iommu, h);
1718 static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
1721 struct pci_dev *pdev = iommu->dev;
1722 u64 val = 0xabcd, val2 = 0, save_reg, save_src;
1724 if (!iommu_feature(iommu, FEATURE_PC))
1727 amd_iommu_pc_present = true;
1729 /* save the value to restore, if writable */
1730 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
1731 iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
1735 * Disable power gating by programing the performance counter
1736 * source to 20 (i.e. counts the reads and writes from/to IOMMU
1737 * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
1738 * which never get incremented during this init phase.
1739 * (Note: The event is also deprecated.)
1742 if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
1745 /* Check if the performance counters can be written to */
1747 for (retry = 5; retry; retry--) {
1748 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
1749 iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
1753 /* Wait about 20 msec for power gating to disable and retry. */
1758 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
1759 iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
1765 pci_info(pdev, "IOMMU performance counters supported\n");
1767 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1768 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1769 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1774 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1775 amd_iommu_pc_present = false;
1779 static ssize_t amd_iommu_show_cap(struct device *dev,
1780 struct device_attribute *attr,
1783 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1784 return sprintf(buf, "%x\n", iommu->cap);
1786 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1788 static ssize_t amd_iommu_show_features(struct device *dev,
1789 struct device_attribute *attr,
1792 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1793 return sprintf(buf, "%llx\n", iommu->features);
1795 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1797 static struct attribute *amd_iommu_attrs[] = {
1799 &dev_attr_features.attr,
1803 static struct attribute_group amd_iommu_group = {
1804 .name = "amd-iommu",
1805 .attrs = amd_iommu_attrs,
1808 static const struct attribute_group *amd_iommu_groups[] = {
1814 * Note: IVHD 0x11 and 0x40 also contains exact copy
1815 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1816 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
1818 static void __init late_iommu_features_init(struct amd_iommu *iommu)
1822 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1825 /* read extended feature bits */
1826 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1828 if (!iommu->features) {
1829 iommu->features = features;
1834 * Sanity check and warn if EFR values from
1835 * IVHD and MMIO conflict.
1837 if (features != iommu->features)
1838 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
1839 features, iommu->features);
1842 static int __init iommu_init_pci(struct amd_iommu *iommu)
1844 int cap_ptr = iommu->cap_ptr;
1847 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1848 iommu->devid & 0xff);
1852 /* Prevent binding other PCI device drivers to IOMMU devices */
1853 iommu->dev->match_driver = false;
1855 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1858 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1859 amd_iommu_iotlb_sup = false;
1861 late_iommu_features_init(iommu);
1863 if (iommu_feature(iommu, FEATURE_GT)) {
1868 pasmax = iommu->features & FEATURE_PASID_MASK;
1869 pasmax >>= FEATURE_PASID_SHIFT;
1870 max_pasid = (1 << (pasmax + 1)) - 1;
1872 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1874 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1876 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1877 glxval >>= FEATURE_GLXVAL_SHIFT;
1879 if (amd_iommu_max_glx_val == -1)
1880 amd_iommu_max_glx_val = glxval;
1882 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1885 if (iommu_feature(iommu, FEATURE_GT) &&
1886 iommu_feature(iommu, FEATURE_PPR)) {
1887 iommu->is_iommu_v2 = true;
1888 amd_iommu_v2_present = true;
1891 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1894 ret = iommu_init_ga(iommu);
1898 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1899 amd_iommu_np_cache = true;
1901 init_iommu_perf_ctr(iommu);
1903 if (is_rd890_iommu(iommu->dev)) {
1907 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1911 * Some rd890 systems may not be fully reconfigured by the
1912 * BIOS, so it's necessary for us to store this information so
1913 * it can be reprogrammed on resume
1915 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1916 &iommu->stored_addr_lo);
1917 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1918 &iommu->stored_addr_hi);
1920 /* Low bit locks writes to configuration space */
1921 iommu->stored_addr_lo &= ~1;
1923 for (i = 0; i < 6; i++)
1924 for (j = 0; j < 0x12; j++)
1925 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1927 for (i = 0; i < 0x83; i++)
1928 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1931 amd_iommu_erratum_746_workaround(iommu);
1932 amd_iommu_ats_write_check_workaround(iommu);
1934 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1935 amd_iommu_groups, "ivhd%d", iommu->index);
1936 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1937 iommu_device_register(&iommu->iommu);
1939 return pci_enable_device(iommu->dev);
1942 static void print_iommu_info(void)
1944 static const char * const feat_str[] = {
1945 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1946 "IA", "GA", "HE", "PC"
1948 struct amd_iommu *iommu;
1950 for_each_iommu(iommu) {
1951 struct pci_dev *pdev = iommu->dev;
1954 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
1956 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1957 pci_info(pdev, "Extended features (%#llx):",
1959 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1960 if (iommu_feature(iommu, (1ULL << i)))
1961 pr_cont(" %s", feat_str[i]);
1964 if (iommu->features & FEATURE_GAM_VAPIC)
1965 pr_cont(" GA_vAPIC");
1970 if (irq_remapping_enabled) {
1971 pr_info("Interrupt remapping enabled\n");
1972 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1973 pr_info("Virtual APIC enabled\n");
1974 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1975 pr_info("X2APIC enabled\n");
1979 static int __init amd_iommu_init_pci(void)
1981 struct amd_iommu *iommu;
1984 for_each_iommu(iommu) {
1985 ret = iommu_init_pci(iommu);
1989 /* Need to setup range after PCI init */
1990 iommu_set_cwwb_range(iommu);
1994 * Order is important here to make sure any unity map requirements are
1995 * fulfilled. The unity mappings are created and written to the device
1996 * table during the amd_iommu_init_api() call.
1998 * After that we call init_device_table_dma() to make sure any
1999 * uninitialized DTE will block DMA, and in the end we flush the caches
2000 * of all IOMMUs to make sure the changes to the device table are
2003 ret = amd_iommu_init_api();
2005 init_device_table_dma();
2007 for_each_iommu(iommu)
2008 iommu_flush_all_caches(iommu);
2016 /****************************************************************************
2018 * The following functions initialize the MSI interrupts for all IOMMUs
2019 * in the system. It's a bit challenging because there could be multiple
2020 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2023 ****************************************************************************/
2025 static int iommu_setup_msi(struct amd_iommu *iommu)
2029 r = pci_enable_msi(iommu->dev);
2033 r = request_threaded_irq(iommu->dev->irq,
2034 amd_iommu_int_handler,
2035 amd_iommu_int_thread,
2040 pci_disable_msi(iommu->dev);
2051 dest_mode_logical : 1,
2058 } __attribute__ ((packed));
2061 * There isn't really any need to mask/unmask at the irqchip level because
2062 * the 64-bit INTCAPXT registers can be updated atomically without tearing
2063 * when the affinity is being updated.
2065 static void intcapxt_unmask_irq(struct irq_data *data)
2069 static void intcapxt_mask_irq(struct irq_data *data)
2073 static struct irq_chip intcapxt_controller;
2075 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2076 struct irq_data *irqd, bool reserve)
2078 struct amd_iommu *iommu = irqd->chip_data;
2079 struct irq_cfg *cfg = irqd_cfg(irqd);
2083 xt.dest_mode_logical = apic->dest_mode_logical;
2084 xt.vector = cfg->vector;
2085 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2086 xt.destid_24_31 = cfg->dest_apicid >> 24;
2089 * Current IOMMU implemtation uses the same IRQ for all
2090 * 3 IOMMU interrupts.
2092 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2093 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2094 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2098 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2099 struct irq_data *irqd)
2101 intcapxt_mask_irq(irqd);
2105 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2106 unsigned int nr_irqs, void *arg)
2108 struct irq_alloc_info *info = arg;
2111 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2114 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2118 for (i = virq; i < virq + nr_irqs; i++) {
2119 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2121 irqd->chip = &intcapxt_controller;
2122 irqd->chip_data = info->data;
2123 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2129 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2130 unsigned int nr_irqs)
2132 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2135 static int intcapxt_set_affinity(struct irq_data *irqd,
2136 const struct cpumask *mask, bool force)
2138 struct irq_data *parent = irqd->parent_data;
2141 ret = parent->chip->irq_set_affinity(parent, mask, force);
2142 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2145 return intcapxt_irqdomain_activate(irqd->domain, irqd, false);
2148 static struct irq_chip intcapxt_controller = {
2149 .name = "IOMMU-MSI",
2150 .irq_unmask = intcapxt_unmask_irq,
2151 .irq_mask = intcapxt_mask_irq,
2152 .irq_ack = irq_chip_ack_parent,
2153 .irq_retrigger = irq_chip_retrigger_hierarchy,
2154 .irq_set_affinity = intcapxt_set_affinity,
2155 .flags = IRQCHIP_SKIP_SET_WAKE,
2158 static const struct irq_domain_ops intcapxt_domain_ops = {
2159 .alloc = intcapxt_irqdomain_alloc,
2160 .free = intcapxt_irqdomain_free,
2161 .activate = intcapxt_irqdomain_activate,
2162 .deactivate = intcapxt_irqdomain_deactivate,
2166 static struct irq_domain *iommu_irqdomain;
2168 static struct irq_domain *iommu_get_irqdomain(void)
2170 struct fwnode_handle *fn;
2172 /* No need for locking here (yet) as the init is single-threaded */
2173 if (iommu_irqdomain)
2174 return iommu_irqdomain;
2176 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2180 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2181 fn, &intcapxt_domain_ops,
2183 if (!iommu_irqdomain)
2184 irq_domain_free_fwnode(fn);
2186 return iommu_irqdomain;
2189 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2191 struct irq_domain *domain;
2192 struct irq_alloc_info info;
2195 domain = iommu_get_irqdomain();
2199 init_irq_alloc_info(&info, NULL);
2200 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2203 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
2205 irq_domain_remove(domain);
2209 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2210 amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2212 irq_domain_free_irqs(irq, 1);
2213 irq_domain_remove(domain);
2217 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2221 static int iommu_init_irq(struct amd_iommu *iommu)
2225 if (iommu->int_enabled)
2228 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2229 ret = iommu_setup_intcapxt(iommu);
2230 else if (iommu->dev->msi_cap)
2231 ret = iommu_setup_msi(iommu);
2238 iommu->int_enabled = true;
2240 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2242 if (iommu->ppr_log != NULL)
2243 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2245 iommu_ga_log_enable(iommu);
2250 /****************************************************************************
2252 * The next functions belong to the third pass of parsing the ACPI
2253 * table. In this last pass the memory mapping requirements are
2254 * gathered (like exclusion and unity mapping ranges).
2256 ****************************************************************************/
2258 static void __init free_unity_maps(void)
2260 struct unity_map_entry *entry, *next;
2262 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2263 list_del(&entry->list);
2268 /* called for unity map ACPI definition */
2269 static int __init init_unity_map_range(struct ivmd_header *m)
2271 struct unity_map_entry *e = NULL;
2274 e = kzalloc(sizeof(*e), GFP_KERNEL);
2282 case ACPI_IVMD_TYPE:
2283 s = "IVMD_TYPEi\t\t\t";
2284 e->devid_start = e->devid_end = m->devid;
2286 case ACPI_IVMD_TYPE_ALL:
2287 s = "IVMD_TYPE_ALL\t\t";
2289 e->devid_end = amd_iommu_last_bdf;
2291 case ACPI_IVMD_TYPE_RANGE:
2292 s = "IVMD_TYPE_RANGE\t\t";
2293 e->devid_start = m->devid;
2294 e->devid_end = m->aux;
2297 e->address_start = PAGE_ALIGN(m->range_start);
2298 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2299 e->prot = m->flags >> 1;
2302 * Treat per-device exclusion ranges as r/w unity-mapped regions
2303 * since some buggy BIOSes might lead to the overwritten exclusion
2304 * range (exclusion_start and exclusion_length members). This
2305 * happens when there are multiple exclusion ranges (IVMD entries)
2306 * defined in ACPI table.
2308 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2309 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2311 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2312 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2313 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2314 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2315 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2316 e->address_start, e->address_end, m->flags);
2318 list_add_tail(&e->list, &amd_iommu_unity_map);
2323 /* iterates over all memory definitions we find in the ACPI table */
2324 static int __init init_memory_definitions(struct acpi_table_header *table)
2326 u8 *p = (u8 *)table, *end = (u8 *)table;
2327 struct ivmd_header *m;
2329 end += table->length;
2330 p += IVRS_HEADER_LENGTH;
2333 m = (struct ivmd_header *)p;
2334 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2335 init_unity_map_range(m);
2344 * Init the device table to not allow DMA access for devices
2346 static void init_device_table_dma(void)
2350 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2351 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2352 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2356 static void __init uninit_device_table_dma(void)
2360 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2361 amd_iommu_dev_table[devid].data[0] = 0ULL;
2362 amd_iommu_dev_table[devid].data[1] = 0ULL;
2366 static void init_device_table(void)
2370 if (!amd_iommu_irq_remap)
2373 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2374 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2377 static void iommu_init_flags(struct amd_iommu *iommu)
2379 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2380 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2381 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2383 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2384 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2385 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2387 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2388 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2389 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2391 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2392 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2393 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2396 * make IOMMU memory accesses cache coherent
2398 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2400 /* Set IOTLB invalidation timeout to 1s */
2401 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2404 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2407 u32 ioc_feature_control;
2408 struct pci_dev *pdev = iommu->root_pdev;
2410 /* RD890 BIOSes may not have completely reconfigured the iommu */
2411 if (!is_rd890_iommu(iommu->dev) || !pdev)
2415 * First, we need to ensure that the iommu is enabled. This is
2416 * controlled by a register in the northbridge
2419 /* Select Northbridge indirect register 0x75 and enable writing */
2420 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2421 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2423 /* Enable the iommu */
2424 if (!(ioc_feature_control & 0x1))
2425 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2427 /* Restore the iommu BAR */
2428 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2429 iommu->stored_addr_lo);
2430 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2431 iommu->stored_addr_hi);
2433 /* Restore the l1 indirect regs for each of the 6 l1s */
2434 for (i = 0; i < 6; i++)
2435 for (j = 0; j < 0x12; j++)
2436 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2438 /* Restore the l2 indirect regs */
2439 for (i = 0; i < 0x83; i++)
2440 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2442 /* Lock PCI setup registers */
2443 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2444 iommu->stored_addr_lo | 1);
2447 static void iommu_enable_ga(struct amd_iommu *iommu)
2449 #ifdef CONFIG_IRQ_REMAP
2450 switch (amd_iommu_guest_ir) {
2451 case AMD_IOMMU_GUEST_IR_VAPIC:
2452 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2454 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2455 iommu_feature_enable(iommu, CONTROL_GA_EN);
2456 iommu->irte_ops = &irte_128_ops;
2459 iommu->irte_ops = &irte_32_ops;
2465 static void early_enable_iommu(struct amd_iommu *iommu)
2467 iommu_disable(iommu);
2468 iommu_init_flags(iommu);
2469 iommu_set_device_table(iommu);
2470 iommu_enable_command_buffer(iommu);
2471 iommu_enable_event_buffer(iommu);
2472 iommu_set_exclusion_range(iommu);
2473 iommu_enable_ga(iommu);
2474 iommu_enable_xt(iommu);
2475 iommu_enable(iommu);
2476 iommu_flush_all_caches(iommu);
2480 * This function finally enables all IOMMUs found in the system after
2481 * they have been initialized.
2483 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2484 * the old content of device table entries. Not this case or copy failed,
2485 * just continue as normal kernel does.
2487 static void early_enable_iommus(void)
2489 struct amd_iommu *iommu;
2492 if (!copy_device_table()) {
2494 * If come here because of failure in copying device table from old
2495 * kernel with all IOMMUs enabled, print error message and try to
2496 * free allocated old_dev_tbl_cpy.
2498 if (amd_iommu_pre_enabled)
2499 pr_err("Failed to copy DEV table from previous kernel.\n");
2500 if (old_dev_tbl_cpy != NULL)
2501 free_pages((unsigned long)old_dev_tbl_cpy,
2502 get_order(dev_table_size));
2504 for_each_iommu(iommu) {
2505 clear_translation_pre_enabled(iommu);
2506 early_enable_iommu(iommu);
2509 pr_info("Copied DEV table from previous kernel.\n");
2510 free_pages((unsigned long)amd_iommu_dev_table,
2511 get_order(dev_table_size));
2512 amd_iommu_dev_table = old_dev_tbl_cpy;
2513 for_each_iommu(iommu) {
2514 iommu_disable_command_buffer(iommu);
2515 iommu_disable_event_buffer(iommu);
2516 iommu_enable_command_buffer(iommu);
2517 iommu_enable_event_buffer(iommu);
2518 iommu_enable_ga(iommu);
2519 iommu_enable_xt(iommu);
2520 iommu_set_device_table(iommu);
2521 iommu_flush_all_caches(iommu);
2525 #ifdef CONFIG_IRQ_REMAP
2526 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2527 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2531 static void enable_iommus_v2(void)
2533 struct amd_iommu *iommu;
2535 for_each_iommu(iommu) {
2536 iommu_enable_ppr_log(iommu);
2537 iommu_enable_gt(iommu);
2541 static void enable_iommus(void)
2543 early_enable_iommus();
2548 static void disable_iommus(void)
2550 struct amd_iommu *iommu;
2552 for_each_iommu(iommu)
2553 iommu_disable(iommu);
2555 #ifdef CONFIG_IRQ_REMAP
2556 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2557 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2562 * Suspend/Resume support
2563 * disable suspend until real resume implemented
2566 static void amd_iommu_resume(void)
2568 struct amd_iommu *iommu;
2570 for_each_iommu(iommu)
2571 iommu_apply_resume_quirks(iommu);
2573 /* re-load the hardware */
2576 amd_iommu_enable_interrupts();
2579 static int amd_iommu_suspend(void)
2581 /* disable IOMMUs to go out of the way for BIOS */
2587 static struct syscore_ops amd_iommu_syscore_ops = {
2588 .suspend = amd_iommu_suspend,
2589 .resume = amd_iommu_resume,
2592 static void __init free_iommu_resources(void)
2594 kmemleak_free(irq_lookup_table);
2595 free_pages((unsigned long)irq_lookup_table,
2596 get_order(rlookup_table_size));
2597 irq_lookup_table = NULL;
2599 kmem_cache_destroy(amd_iommu_irq_cache);
2600 amd_iommu_irq_cache = NULL;
2602 free_pages((unsigned long)amd_iommu_rlookup_table,
2603 get_order(rlookup_table_size));
2604 amd_iommu_rlookup_table = NULL;
2606 free_pages((unsigned long)amd_iommu_alias_table,
2607 get_order(alias_table_size));
2608 amd_iommu_alias_table = NULL;
2610 free_pages((unsigned long)amd_iommu_dev_table,
2611 get_order(dev_table_size));
2612 amd_iommu_dev_table = NULL;
2617 /* SB IOAPIC is always on this device in AMD systems */
2618 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2620 static bool __init check_ioapic_information(void)
2622 const char *fw_bug = FW_BUG;
2623 bool ret, has_sb_ioapic;
2626 has_sb_ioapic = false;
2630 * If we have map overrides on the kernel command line the
2631 * messages in this function might not describe firmware bugs
2632 * anymore - so be careful
2637 for (idx = 0; idx < nr_ioapics; idx++) {
2638 int devid, id = mpc_ioapic_id(idx);
2640 devid = get_ioapic_devid(id);
2642 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2645 } else if (devid == IOAPIC_SB_DEVID) {
2646 has_sb_ioapic = true;
2651 if (!has_sb_ioapic) {
2653 * We expect the SB IOAPIC to be listed in the IVRS
2654 * table. The system timer is connected to the SB IOAPIC
2655 * and if we don't have it in the list the system will
2656 * panic at boot time. This situation usually happens
2657 * when the BIOS is buggy and provides us the wrong
2658 * device id for the IOAPIC in the system.
2660 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2664 pr_err("Disabling interrupt remapping\n");
2669 static void __init free_dma_resources(void)
2671 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2672 get_order(MAX_DOMAIN_ID/8));
2673 amd_iommu_pd_alloc_bitmap = NULL;
2678 static void __init ivinfo_init(void *ivrs)
2680 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2684 * This is the hardware init function for AMD IOMMU in the system.
2685 * This function is called either from amd_iommu_init or from the interrupt
2686 * remapping setup code.
2688 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2691 * 1 pass) Discover the most comprehensive IVHD type to use.
2693 * 2 pass) Find the highest PCI device id the driver has to handle.
2694 * Upon this information the size of the data structures is
2695 * determined that needs to be allocated.
2697 * 3 pass) Initialize the data structures just allocated with the
2698 * information in the ACPI table about available AMD IOMMUs
2699 * in the system. It also maps the PCI devices in the
2700 * system to specific IOMMUs
2702 * 4 pass) After the basic data structures are allocated and
2703 * initialized we update them with information about memory
2704 * remapping requirements parsed out of the ACPI table in
2707 * After everything is set up the IOMMUs are enabled and the necessary
2708 * hotplug and suspend notifiers are registered.
2710 static int __init early_amd_iommu_init(void)
2712 struct acpi_table_header *ivrs_base;
2713 int i, remap_cache_sz, ret;
2717 if (!amd_iommu_detected)
2720 status = acpi_get_table("IVRS", 0, &ivrs_base);
2721 if (status == AE_NOT_FOUND)
2723 else if (ACPI_FAILURE(status)) {
2724 const char *err = acpi_format_exception(status);
2725 pr_err("IVRS table error: %s\n", err);
2730 * Validate checksum here so we don't need to do it when
2731 * we actually parse the table
2733 ret = check_ivrs_checksum(ivrs_base);
2737 ivinfo_init(ivrs_base);
2739 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2740 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2743 * First parse ACPI tables to find the largest Bus/Dev/Func
2744 * we need to handle. Upon this information the shared data
2745 * structures for the IOMMUs in the system will be allocated
2747 ret = find_last_devid_acpi(ivrs_base);
2751 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2752 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2753 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2755 /* Device table - directly used by all IOMMUs */
2757 amd_iommu_dev_table = (void *)__get_free_pages(
2758 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2759 get_order(dev_table_size));
2760 if (amd_iommu_dev_table == NULL)
2764 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2765 * IOMMU see for that device
2767 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2768 get_order(alias_table_size));
2769 if (amd_iommu_alias_table == NULL)
2772 /* IOMMU rlookup table - find the IOMMU for a specific device */
2773 amd_iommu_rlookup_table = (void *)__get_free_pages(
2774 GFP_KERNEL | __GFP_ZERO,
2775 get_order(rlookup_table_size));
2776 if (amd_iommu_rlookup_table == NULL)
2779 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2780 GFP_KERNEL | __GFP_ZERO,
2781 get_order(MAX_DOMAIN_ID/8));
2782 if (amd_iommu_pd_alloc_bitmap == NULL)
2786 * let all alias entries point to itself
2788 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2789 amd_iommu_alias_table[i] = i;
2792 * never allocate domain 0 because its used as the non-allocated and
2793 * error value placeholder
2795 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2798 * now the data structures are allocated and basically initialized
2799 * start the real acpi table scan
2801 ret = init_iommu_all(ivrs_base);
2805 /* Disable IOMMU if there's Stoney Ridge graphics */
2806 for (i = 0; i < 32; i++) {
2807 pci_id = read_pci_config(0, i, 0, 0);
2808 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2809 pr_info("Disable IOMMU on Stoney Ridge\n");
2810 amd_iommu_disabled = true;
2815 /* Disable any previously enabled IOMMUs */
2816 if (!is_kdump_kernel() || amd_iommu_disabled)
2819 if (amd_iommu_irq_remap)
2820 amd_iommu_irq_remap = check_ioapic_information();
2822 if (amd_iommu_irq_remap) {
2824 * Interrupt remapping enabled, create kmem_cache for the
2828 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2829 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2831 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2832 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2834 DTE_INTTAB_ALIGNMENT,
2836 if (!amd_iommu_irq_cache)
2839 irq_lookup_table = (void *)__get_free_pages(
2840 GFP_KERNEL | __GFP_ZERO,
2841 get_order(rlookup_table_size));
2842 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2844 if (!irq_lookup_table)
2848 ret = init_memory_definitions(ivrs_base);
2852 /* init the device table */
2853 init_device_table();
2856 /* Don't leak any ACPI memory */
2857 acpi_put_table(ivrs_base);
2862 static int amd_iommu_enable_interrupts(void)
2864 struct amd_iommu *iommu;
2867 for_each_iommu(iommu) {
2868 ret = iommu_init_irq(iommu);
2877 static bool detect_ivrs(void)
2879 struct acpi_table_header *ivrs_base;
2882 status = acpi_get_table("IVRS", 0, &ivrs_base);
2883 if (status == AE_NOT_FOUND)
2885 else if (ACPI_FAILURE(status)) {
2886 const char *err = acpi_format_exception(status);
2887 pr_err("IVRS table error: %s\n", err);
2891 acpi_put_table(ivrs_base);
2893 /* Make sure ACS will be enabled during PCI probe */
2899 /****************************************************************************
2901 * AMD IOMMU Initialization State Machine
2903 ****************************************************************************/
2905 static int __init state_next(void)
2909 switch (init_state) {
2910 case IOMMU_START_STATE:
2911 if (!detect_ivrs()) {
2912 init_state = IOMMU_NOT_FOUND;
2915 init_state = IOMMU_IVRS_DETECTED;
2918 case IOMMU_IVRS_DETECTED:
2919 ret = early_amd_iommu_init();
2920 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2921 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2922 pr_info("AMD IOMMU disabled\n");
2923 init_state = IOMMU_CMDLINE_DISABLED;
2927 case IOMMU_ACPI_FINISHED:
2928 early_enable_iommus();
2929 x86_platform.iommu_shutdown = disable_iommus;
2930 init_state = IOMMU_ENABLED;
2933 register_syscore_ops(&amd_iommu_syscore_ops);
2934 ret = amd_iommu_init_pci();
2935 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2938 case IOMMU_PCI_INIT:
2939 ret = amd_iommu_enable_interrupts();
2940 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2942 case IOMMU_INTERRUPTS_EN:
2943 ret = amd_iommu_init_dma_ops();
2944 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2947 init_state = IOMMU_INITIALIZED;
2949 case IOMMU_INITIALIZED:
2952 case IOMMU_NOT_FOUND:
2953 case IOMMU_INIT_ERROR:
2954 case IOMMU_CMDLINE_DISABLED:
2955 /* Error states => do nothing */
2964 free_dma_resources();
2965 if (!irq_remapping_enabled) {
2967 free_iommu_resources();
2969 struct amd_iommu *iommu;
2971 uninit_device_table_dma();
2972 for_each_iommu(iommu)
2973 iommu_flush_all_caches(iommu);
2979 static int __init iommu_go_to_state(enum iommu_init_state state)
2983 while (init_state != state) {
2984 if (init_state == IOMMU_NOT_FOUND ||
2985 init_state == IOMMU_INIT_ERROR ||
2986 init_state == IOMMU_CMDLINE_DISABLED)
2994 #ifdef CONFIG_IRQ_REMAP
2995 int __init amd_iommu_prepare(void)
2999 amd_iommu_irq_remap = true;
3001 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3004 return amd_iommu_irq_remap ? 0 : -ENODEV;
3007 int __init amd_iommu_enable(void)
3011 ret = iommu_go_to_state(IOMMU_ENABLED);
3015 irq_remapping_enabled = 1;
3016 return amd_iommu_xt_mode;
3019 void amd_iommu_disable(void)
3021 amd_iommu_suspend();
3024 int amd_iommu_reenable(int mode)
3031 int __init amd_iommu_enable_faulting(void)
3033 /* We enable MSI later when PCI is initialized */
3039 * This is the core init function for AMD IOMMU hardware in the system.
3040 * This function is called from the generic x86 DMA layer initialization
3043 static int __init amd_iommu_init(void)
3045 struct amd_iommu *iommu;
3048 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3049 #ifdef CONFIG_GART_IOMMU
3050 if (ret && list_empty(&amd_iommu_list)) {
3052 * We failed to initialize the AMD IOMMU - try fallback
3053 * to GART if possible.
3059 for_each_iommu(iommu)
3060 amd_iommu_debugfs_setup(iommu);
3065 static bool amd_iommu_sme_check(void)
3067 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
3070 /* For Fam17h, a specific level of support is required */
3071 if (boot_cpu_data.microcode >= 0x08001205)
3074 if ((boot_cpu_data.microcode >= 0x08001126) &&
3075 (boot_cpu_data.microcode <= 0x080011ff))
3078 pr_notice("IOMMU not currently supported when SME is active\n");
3083 /****************************************************************************
3085 * Early detect code. This code runs at IOMMU detection time in the DMA
3086 * layer. It just looks if there is an IVRS ACPI table to detect AMD
3089 ****************************************************************************/
3090 int __init amd_iommu_detect(void)
3094 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3097 if (!amd_iommu_sme_check())
3100 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3104 amd_iommu_detected = true;
3106 x86_init.iommu.iommu_init = amd_iommu_init;
3111 /****************************************************************************
3113 * Parsing functions for the AMD IOMMU specific kernel command line
3116 ****************************************************************************/
3118 static int __init parse_amd_iommu_dump(char *str)
3120 amd_iommu_dump = true;
3125 static int __init parse_amd_iommu_intr(char *str)
3127 for (; *str; ++str) {
3128 if (strncmp(str, "legacy", 6) == 0) {
3129 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3132 if (strncmp(str, "vapic", 5) == 0) {
3133 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3140 static int __init parse_amd_iommu_options(char *str)
3142 for (; *str; ++str) {
3143 if (strncmp(str, "fullflush", 9) == 0)
3144 amd_iommu_unmap_flush = true;
3145 if (strncmp(str, "off", 3) == 0)
3146 amd_iommu_disabled = true;
3147 if (strncmp(str, "force_isolation", 15) == 0)
3148 amd_iommu_force_isolation = true;
3154 static int __init parse_ivrs_ioapic(char *str)
3156 unsigned int bus, dev, fn;
3160 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3163 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3167 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3168 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3173 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3175 cmdline_maps = true;
3176 i = early_ioapic_map_size++;
3177 early_ioapic_map[i].id = id;
3178 early_ioapic_map[i].devid = devid;
3179 early_ioapic_map[i].cmd_line = true;
3184 static int __init parse_ivrs_hpet(char *str)
3186 unsigned int bus, dev, fn;
3190 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3193 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3197 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3198 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3203 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3205 cmdline_maps = true;
3206 i = early_hpet_map_size++;
3207 early_hpet_map[i].id = id;
3208 early_hpet_map[i].devid = devid;
3209 early_hpet_map[i].cmd_line = true;
3214 static int __init parse_ivrs_acpihid(char *str)
3217 char *hid, *uid, *p;
3218 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3221 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3223 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3228 hid = strsep(&p, ":");
3231 if (!hid || !(*hid) || !uid) {
3232 pr_err("Invalid command line: hid or uid\n");
3236 i = early_acpihid_map_size++;
3237 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3238 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3239 early_acpihid_map[i].devid =
3240 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3241 early_acpihid_map[i].cmd_line = true;
3246 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3247 __setup("amd_iommu=", parse_amd_iommu_options);
3248 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3249 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3250 __setup("ivrs_hpet", parse_ivrs_hpet);
3251 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3253 IOMMU_INIT_FINISH(amd_iommu_detect,
3254 gart_iommu_hole_init,
3258 bool amd_iommu_v2_supported(void)
3260 return amd_iommu_v2_present;
3262 EXPORT_SYMBOL(amd_iommu_v2_supported);
3264 struct amd_iommu *get_amd_iommu(unsigned int idx)
3267 struct amd_iommu *iommu;
3269 for_each_iommu(iommu)
3275 /****************************************************************************
3277 * IOMMU EFR Performance Counter support functionality. This code allows
3278 * access to the IOMMU PC functionality.
3280 ****************************************************************************/
3282 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3284 struct amd_iommu *iommu = get_amd_iommu(idx);
3287 return iommu->max_banks;
3291 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3293 bool amd_iommu_pc_supported(void)
3295 return amd_iommu_pc_present;
3297 EXPORT_SYMBOL(amd_iommu_pc_supported);
3299 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3301 struct amd_iommu *iommu = get_amd_iommu(idx);
3304 return iommu->max_counters;
3308 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3310 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3311 u8 fxn, u64 *value, bool is_write)
3316 /* Make sure the IOMMU PC resource is available */
3317 if (!amd_iommu_pc_present)
3320 /* Check for valid iommu and pc register indexing */
3321 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3324 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3326 /* Limit the offset to the hw defined mmio region aperture */
3327 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3328 (iommu->max_counters << 8) | 0x28);
3329 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3330 (offset > max_offset_lim))
3334 u64 val = *value & GENMASK_ULL(47, 0);
3336 writel((u32)val, iommu->mmio_base + offset);
3337 writel((val >> 32), iommu->mmio_base + offset + 4);
3339 *value = readl(iommu->mmio_base + offset + 4);
3341 *value |= readl(iommu->mmio_base + offset);
3342 *value &= GENMASK_ULL(47, 0);
3348 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3353 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3356 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3361 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);