1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/ratelimit.h>
12 #include <linux/pci.h>
13 #include <linux/acpi.h>
14 #include <linux/pci-ats.h>
15 #include <linux/bitmap.h>
16 #include <linux/slab.h>
17 #include <linux/debugfs.h>
18 #include <linux/scatterlist.h>
19 #include <linux/dma-map-ops.h>
20 #include <linux/dma-direct.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/delay.h>
23 #include <linux/amd-iommu.h>
24 #include <linux/notifier.h>
25 #include <linux/export.h>
26 #include <linux/irq.h>
27 #include <linux/msi.h>
28 #include <linux/irqdomain.h>
29 #include <linux/percpu.h>
30 #include <linux/io-pgtable.h>
31 #include <linux/cc_platform.h>
32 #include <asm/irq_remapping.h>
33 #include <asm/io_apic.h>
35 #include <asm/hw_irq.h>
36 #include <asm/proto.h>
37 #include <asm/iommu.h>
40 #include <uapi/linux/iommufd.h>
42 #include "amd_iommu.h"
43 #include "../dma-iommu.h"
44 #include "../irq_remapping.h"
46 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
48 /* IO virtual address start page frame number */
49 #define IOVA_START_PFN (1)
50 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
52 /* Reserved IOVA ranges */
53 #define MSI_RANGE_START (0xfee00000)
54 #define MSI_RANGE_END (0xfeefffff)
55 #define HT_RANGE_START (0xfd00000000ULL)
56 #define HT_RANGE_END (0xffffffffffULL)
58 #define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
60 static DEFINE_SPINLOCK(pd_bitmap_lock);
62 LIST_HEAD(ioapic_map);
64 LIST_HEAD(acpihid_map);
66 const struct iommu_ops amd_iommu_ops;
67 static const struct iommu_dirty_ops amd_dirty_ops;
69 int amd_iommu_max_glx_val = -1;
72 * general struct to manage commands send to an IOMMU
78 struct kmem_cache *amd_iommu_irq_cache;
80 static void detach_device(struct device *dev);
82 /****************************************************************************
86 ****************************************************************************/
88 static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
90 return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
93 static inline int get_acpihid_device_id(struct device *dev,
94 struct acpihid_map_entry **entry)
96 struct acpi_device *adev = ACPI_COMPANION(dev);
97 struct acpihid_map_entry *p;
102 list_for_each_entry(p, &acpihid_map, list) {
103 if (acpi_dev_hid_uid_match(adev, p->hid,
104 p->uid[0] ? p->uid : NULL)) {
113 static inline int get_device_sbdf_id(struct device *dev)
118 sbdf = get_pci_sbdf_id(to_pci_dev(dev));
120 sbdf = get_acpihid_device_id(dev, NULL);
125 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
127 struct dev_table_entry *dev_table;
128 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
130 BUG_ON(pci_seg == NULL);
131 dev_table = pci_seg->dev_table;
132 BUG_ON(dev_table == NULL);
137 static inline u16 get_device_segment(struct device *dev)
141 if (dev_is_pci(dev)) {
142 struct pci_dev *pdev = to_pci_dev(dev);
144 seg = pci_domain_nr(pdev->bus);
146 u32 devid = get_acpihid_device_id(dev, NULL);
148 seg = PCI_SBDF_TO_SEGID(devid);
154 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
155 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
157 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
159 pci_seg->rlookup_table[devid] = iommu;
162 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
164 struct amd_iommu_pci_seg *pci_seg;
166 for_each_pci_segment(pci_seg) {
167 if (pci_seg->id == seg)
168 return pci_seg->rlookup_table[devid];
173 static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
175 u16 seg = get_device_segment(dev);
176 int devid = get_device_sbdf_id(dev);
180 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
183 static struct protection_domain *to_pdomain(struct iommu_domain *dom)
185 return container_of(dom, struct protection_domain, domain);
188 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
190 struct iommu_dev_data *dev_data;
191 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
193 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
197 spin_lock_init(&dev_data->lock);
198 dev_data->devid = devid;
199 ratelimit_default_init(&dev_data->rs);
201 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
205 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
207 struct iommu_dev_data *dev_data;
208 struct llist_node *node;
209 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
211 if (llist_empty(&pci_seg->dev_data_list))
214 node = pci_seg->dev_data_list.first;
215 llist_for_each_entry(dev_data, node, dev_data_list) {
216 if (dev_data->devid == devid)
223 static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
225 struct amd_iommu *iommu;
226 struct dev_table_entry *dev_table;
227 u16 devid = pci_dev_id(pdev);
232 iommu = rlookup_amd_iommu(&pdev->dev);
236 amd_iommu_set_rlookup_table(iommu, alias);
237 dev_table = get_dev_table(iommu);
238 memcpy(dev_table[alias].data,
239 dev_table[devid].data,
240 sizeof(dev_table[alias].data));
245 static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
247 struct pci_dev *pdev;
249 if (!dev_is_pci(dev))
251 pdev = to_pci_dev(dev);
254 * The IVRS alias stored in the alias table may not be
255 * part of the PCI DMA aliases if it's bus differs
256 * from the original device.
258 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
260 pci_for_each_dma_alias(pdev, clone_alias, NULL);
263 static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
265 struct pci_dev *pdev = to_pci_dev(dev);
266 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
269 /* For ACPI HID devices, there are no aliases */
270 if (!dev_is_pci(dev))
274 * Add the IVRS alias to the pci aliases if it is on the same
275 * bus. The IVRS table may know about a quirk that we don't.
277 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
278 if (ivrs_alias != pci_dev_id(pdev) &&
279 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
280 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
282 clone_aliases(iommu, dev);
285 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
287 struct iommu_dev_data *dev_data;
289 dev_data = search_dev_data(iommu, devid);
291 if (dev_data == NULL) {
292 dev_data = alloc_dev_data(iommu, devid);
296 if (translation_pre_enabled(iommu))
297 dev_data->defer_attach = true;
304 * Find or create an IOMMU group for a acpihid device.
306 static struct iommu_group *acpihid_device_group(struct device *dev)
308 struct acpihid_map_entry *p, *entry = NULL;
311 devid = get_acpihid_device_id(dev, &entry);
313 return ERR_PTR(devid);
315 list_for_each_entry(p, &acpihid_map, list) {
316 if ((devid == p->devid) && p->group)
317 entry->group = p->group;
321 entry->group = generic_device_group(dev);
323 iommu_group_ref_get(entry->group);
328 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
330 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
333 static u32 pdev_get_caps(struct pci_dev *pdev)
338 if (pci_ats_supported(pdev))
339 flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
341 if (pci_pri_supported(pdev))
342 flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
344 features = pci_pasid_features(pdev);
346 flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
348 if (features & PCI_PASID_CAP_EXEC)
349 flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
351 if (features & PCI_PASID_CAP_PRIV)
352 flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
358 static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
360 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
363 if (dev_data->ats_enabled)
366 if (amd_iommu_iotlb_sup &&
367 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
368 ret = pci_enable_ats(pdev, PAGE_SHIFT);
370 dev_data->ats_enabled = 1;
371 dev_data->ats_qdep = pci_ats_queue_depth(pdev);
378 static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
380 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
382 if (dev_data->ats_enabled) {
383 pci_disable_ats(pdev);
384 dev_data->ats_enabled = 0;
388 int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
390 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
393 if (dev_data->pri_enabled)
396 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
398 * First reset the PRI state of the device.
399 * FIXME: Hardcode number of outstanding requests for now
401 if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
402 dev_data->pri_enabled = 1;
403 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
412 void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
414 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
416 if (dev_data->pri_enabled) {
417 pci_disable_pri(pdev);
418 dev_data->pri_enabled = 0;
422 static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
424 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
427 if (dev_data->pasid_enabled)
430 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
431 /* Only allow access to user-accessible pages */
432 ret = pci_enable_pasid(pdev, 0);
434 dev_data->pasid_enabled = 1;
440 static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
442 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
444 if (dev_data->pasid_enabled) {
445 pci_disable_pasid(pdev);
446 dev_data->pasid_enabled = 0;
450 static void pdev_enable_caps(struct pci_dev *pdev)
452 pdev_enable_cap_ats(pdev);
453 pdev_enable_cap_pasid(pdev);
454 amd_iommu_pdev_enable_cap_pri(pdev);
458 static void pdev_disable_caps(struct pci_dev *pdev)
460 pdev_disable_cap_ats(pdev);
461 pdev_disable_cap_pasid(pdev);
462 amd_iommu_pdev_disable_cap_pri(pdev);
466 * This function checks if the driver got a valid device from the caller to
467 * avoid dereferencing invalid pointers.
469 static bool check_device(struct device *dev)
471 struct amd_iommu_pci_seg *pci_seg;
472 struct amd_iommu *iommu;
478 sbdf = get_device_sbdf_id(dev);
481 devid = PCI_SBDF_TO_DEVID(sbdf);
483 iommu = rlookup_amd_iommu(dev);
487 /* Out of our scope? */
488 pci_seg = iommu->pci_seg;
489 if (devid > pci_seg->last_bdf)
495 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
497 struct iommu_dev_data *dev_data;
500 if (dev_iommu_priv_get(dev))
503 sbdf = get_device_sbdf_id(dev);
507 devid = PCI_SBDF_TO_DEVID(sbdf);
508 dev_data = find_dev_data(iommu, devid);
513 setup_aliases(iommu, dev);
516 * By default we use passthrough mode for IOMMUv2 capable device.
517 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
518 * invalid address), we ignore the capability for the device so
519 * it'll be forced to go into translation mode.
521 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
522 dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
523 dev_data->flags = pdev_get_caps(to_pci_dev(dev));
526 dev_iommu_priv_set(dev, dev_data);
531 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
533 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
534 struct dev_table_entry *dev_table = get_dev_table(iommu);
537 sbdf = get_device_sbdf_id(dev);
541 devid = PCI_SBDF_TO_DEVID(sbdf);
542 pci_seg->rlookup_table[devid] = NULL;
543 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
545 setup_aliases(iommu, dev);
548 static void amd_iommu_uninit_device(struct device *dev)
550 struct iommu_dev_data *dev_data;
552 dev_data = dev_iommu_priv_get(dev);
556 if (dev_data->domain)
560 * We keep dev_data around for unplugged devices and reuse it when the
561 * device is re-plugged - not doing so would introduce a ton of races.
565 /****************************************************************************
567 * Interrupt handling functions
569 ****************************************************************************/
571 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
574 struct dev_table_entry *dev_table = get_dev_table(iommu);
576 for (i = 0; i < 4; ++i)
577 pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
580 static void dump_command(unsigned long phys_addr)
582 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
585 for (i = 0; i < 4; ++i)
586 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
589 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
591 struct iommu_dev_data *dev_data = NULL;
592 int devid, vmg_tag, flags;
593 struct pci_dev *pdev;
596 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
597 vmg_tag = (event[1]) & 0xFFFF;
598 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
599 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
601 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
604 dev_data = dev_iommu_priv_get(&pdev->dev);
607 if (__ratelimit(&dev_data->rs)) {
608 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
609 vmg_tag, spa, flags);
612 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
613 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
614 vmg_tag, spa, flags);
621 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
623 struct iommu_dev_data *dev_data = NULL;
624 int devid, flags_rmp, vmg_tag, flags;
625 struct pci_dev *pdev;
628 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
629 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
630 vmg_tag = (event[1]) & 0xFFFF;
631 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
632 gpa = ((u64)event[3] << 32) | event[2];
634 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
637 dev_data = dev_iommu_priv_get(&pdev->dev);
640 if (__ratelimit(&dev_data->rs)) {
641 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
642 vmg_tag, gpa, flags_rmp, flags);
645 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
646 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
647 vmg_tag, gpa, flags_rmp, flags);
654 #define IS_IOMMU_MEM_TRANSACTION(flags) \
655 (((flags) & EVENT_FLAG_I) == 0)
657 #define IS_WRITE_REQUEST(flags) \
658 ((flags) & EVENT_FLAG_RW)
660 static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
661 u16 devid, u16 domain_id,
662 u64 address, int flags)
664 struct iommu_dev_data *dev_data = NULL;
665 struct pci_dev *pdev;
667 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
670 dev_data = dev_iommu_priv_get(&pdev->dev);
674 * If this is a DMA fault (for which the I(nterrupt)
675 * bit will be unset), allow report_iommu_fault() to
676 * prevent logging it.
678 if (IS_IOMMU_MEM_TRANSACTION(flags)) {
679 /* Device not attached to domain properly */
680 if (dev_data->domain == NULL) {
681 pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
682 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
683 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
684 PCI_FUNC(devid), domain_id);
688 if (!report_iommu_fault(&dev_data->domain->domain,
690 IS_WRITE_REQUEST(flags) ?
696 if (__ratelimit(&dev_data->rs)) {
697 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
698 domain_id, address, flags);
701 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
702 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
703 domain_id, address, flags);
711 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
713 struct device *dev = iommu->iommu.dev;
714 int type, devid, flags, tag;
715 volatile u32 *event = __evt;
721 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
722 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
723 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
724 (event[1] & EVENT_DOMID_MASK_LO);
725 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
726 address = (u64)(((u64)event[3]) << 32) | event[2];
729 /* Did we hit the erratum? */
730 if (++count == LOOP_TIMEOUT) {
731 pr_err("No event written to event log\n");
738 if (type == EVENT_TYPE_IO_FAULT) {
739 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
744 case EVENT_TYPE_ILL_DEV:
745 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
746 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
747 pasid, address, flags);
748 dump_dte_entry(iommu, devid);
750 case EVENT_TYPE_DEV_TAB_ERR:
751 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
752 "address=0x%llx flags=0x%04x]\n",
753 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
756 case EVENT_TYPE_PAGE_TAB_ERR:
757 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
758 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
759 pasid, address, flags);
761 case EVENT_TYPE_ILL_CMD:
762 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
763 dump_command(address);
765 case EVENT_TYPE_CMD_HARD_ERR:
766 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
769 case EVENT_TYPE_IOTLB_INV_TO:
770 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
771 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
774 case EVENT_TYPE_INV_DEV_REQ:
775 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
776 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
777 pasid, address, flags);
779 case EVENT_TYPE_RMP_FAULT:
780 amd_iommu_report_rmp_fault(iommu, event);
782 case EVENT_TYPE_RMP_HW_ERR:
783 amd_iommu_report_rmp_hw_error(iommu, event);
785 case EVENT_TYPE_INV_PPR_REQ:
786 pasid = PPR_PASID(*((u64 *)__evt));
787 tag = event[1] & 0x03FF;
788 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
789 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
790 pasid, address, flags, tag);
793 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
794 event[0], event[1], event[2], event[3]);
798 * To detect the hardware errata 732 we need to clear the
799 * entry back to zero. This issue does not exist on SNP
800 * enabled system. Also this buffer is not writeable on
801 * SNP enabled system.
803 if (!amd_iommu_snp_en)
804 memset(__evt, 0, 4 * sizeof(u32));
807 static void iommu_poll_events(struct amd_iommu *iommu)
811 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
812 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
814 while (head != tail) {
815 iommu_print_event(iommu, iommu->evt_buf + head);
816 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
819 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
822 static void iommu_poll_ppr_log(struct amd_iommu *iommu)
826 if (iommu->ppr_log == NULL)
829 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
830 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
832 while (head != tail) {
837 raw = (u64 *)(iommu->ppr_log + head);
840 * Hardware bug: Interrupt may arrive before the entry is
841 * written to memory. If this happens we need to wait for the
844 for (i = 0; i < LOOP_TIMEOUT; ++i) {
845 if (PPR_REQ_TYPE(raw[0]) != 0)
850 /* Avoid memcpy function-call overhead */
855 * To detect the hardware errata 733 we need to clear the
856 * entry back to zero. This issue does not exist on SNP
857 * enabled system. Also this buffer is not writeable on
858 * SNP enabled system.
860 if (!amd_iommu_snp_en)
861 raw[0] = raw[1] = 0UL;
863 /* Update head pointer of hardware ring-buffer */
864 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
865 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
867 /* TODO: PPR Handler will be added when we add IOPF support */
869 /* Refresh ring-buffer information */
870 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
871 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
875 #ifdef CONFIG_IRQ_REMAP
876 static int (*iommu_ga_log_notifier)(u32);
878 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
880 iommu_ga_log_notifier = notifier;
884 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
886 static void iommu_poll_ga_log(struct amd_iommu *iommu)
890 if (iommu->ga_log == NULL)
893 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
894 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
896 while (head != tail) {
900 raw = (u64 *)(iommu->ga_log + head);
902 /* Avoid memcpy function-call overhead */
905 /* Update head pointer of hardware ring-buffer */
906 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
907 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
909 /* Handle GA entry */
910 switch (GA_REQ_TYPE(log_entry)) {
912 if (!iommu_ga_log_notifier)
915 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
916 __func__, GA_DEVID(log_entry),
919 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
920 pr_err("GA log notifier failed.\n");
929 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
931 if (!irq_remapping_enabled || !dev_is_pci(dev) ||
932 !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev)))
935 dev_set_msi_domain(dev, iommu->ir_domain);
938 #else /* CONFIG_IRQ_REMAP */
940 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
941 #endif /* !CONFIG_IRQ_REMAP */
943 static void amd_iommu_handle_irq(void *data, const char *evt_type,
944 u32 int_mask, u32 overflow_mask,
945 void (*int_handler)(struct amd_iommu *),
946 void (*overflow_handler)(struct amd_iommu *))
948 struct amd_iommu *iommu = (struct amd_iommu *) data;
949 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
950 u32 mask = int_mask | overflow_mask;
952 while (status & mask) {
953 /* Enable interrupt sources again */
954 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
957 pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
958 iommu->index, evt_type);
962 if ((status & overflow_mask) && overflow_handler)
963 overflow_handler(iommu);
966 * Hardware bug: ERBT1312
967 * When re-enabling interrupt (by writing 1
968 * to clear the bit), the hardware might also try to set
969 * the interrupt bit in the event status register.
970 * In this scenario, the bit will be set, and disable
971 * subsequent interrupts.
973 * Workaround: The IOMMU driver should read back the
974 * status register and check if the interrupt bits are cleared.
975 * If not, driver will need to go through the interrupt handler
976 * again and re-clear the bits
978 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
982 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
984 amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
985 MMIO_STATUS_EVT_OVERFLOW_MASK,
986 iommu_poll_events, amd_iommu_restart_event_logging);
991 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
993 amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
994 MMIO_STATUS_PPR_OVERFLOW_MASK,
995 iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
1000 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
1002 #ifdef CONFIG_IRQ_REMAP
1003 amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
1004 MMIO_STATUS_GALOG_OVERFLOW_MASK,
1005 iommu_poll_ga_log, amd_iommu_restart_ga_log);
1011 irqreturn_t amd_iommu_int_thread(int irq, void *data)
1013 amd_iommu_int_thread_evtlog(irq, data);
1014 amd_iommu_int_thread_pprlog(irq, data);
1015 amd_iommu_int_thread_galog(irq, data);
1020 irqreturn_t amd_iommu_int_handler(int irq, void *data)
1022 return IRQ_WAKE_THREAD;
1025 /****************************************************************************
1027 * IOMMU command queuing functions
1029 ****************************************************************************/
1031 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
1035 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
1040 if (i == LOOP_TIMEOUT) {
1041 pr_alert("Completion-Wait loop timed out\n");
1048 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
1049 struct iommu_cmd *cmd)
1054 /* Copy command to buffer */
1055 tail = iommu->cmd_buf_tail;
1056 target = iommu->cmd_buf + tail;
1057 memcpy(target, cmd, sizeof(*cmd));
1059 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1060 iommu->cmd_buf_tail = tail;
1062 /* Tell the IOMMU about it */
1063 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1066 static void build_completion_wait(struct iommu_cmd *cmd,
1067 struct amd_iommu *iommu,
1070 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
1072 memset(cmd, 0, sizeof(*cmd));
1073 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
1074 cmd->data[1] = upper_32_bits(paddr);
1075 cmd->data[2] = lower_32_bits(data);
1076 cmd->data[3] = upper_32_bits(data);
1077 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
1080 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
1082 memset(cmd, 0, sizeof(*cmd));
1083 cmd->data[0] = devid;
1084 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
1088 * Builds an invalidation address which is suitable for one page or multiple
1089 * pages. Sets the size bit (S) as needed is more than one page is flushed.
1091 static inline u64 build_inv_address(u64 address, size_t size)
1093 u64 pages, end, msb_diff;
1095 pages = iommu_num_pages(address, size, PAGE_SIZE);
1098 return address & PAGE_MASK;
1100 end = address + size - 1;
1103 * msb_diff would hold the index of the most significant bit that
1104 * flipped between the start and end.
1106 msb_diff = fls64(end ^ address) - 1;
1109 * Bits 63:52 are sign extended. If for some reason bit 51 is different
1110 * between the start and the end, invalidate everything.
1112 if (unlikely(msb_diff > 51)) {
1113 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
1116 * The msb-bit must be clear on the address. Just set all the
1119 address |= (1ull << msb_diff) - 1;
1122 /* Clear bits 11:0 */
1123 address &= PAGE_MASK;
1125 /* Set the size bit - we flush more than one 4kb page */
1126 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
1129 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
1130 size_t size, u16 domid,
1131 ioasid_t pasid, bool gn)
1133 u64 inv_address = build_inv_address(address, size);
1135 memset(cmd, 0, sizeof(*cmd));
1137 cmd->data[1] |= domid;
1138 cmd->data[2] = lower_32_bits(inv_address);
1139 cmd->data[3] = upper_32_bits(inv_address);
1140 /* PDE bit - we want to flush everything, not only the PTEs */
1141 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1143 cmd->data[0] |= pasid;
1144 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1146 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1149 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
1150 u64 address, size_t size,
1151 ioasid_t pasid, bool gn)
1153 u64 inv_address = build_inv_address(address, size);
1155 memset(cmd, 0, sizeof(*cmd));
1157 cmd->data[0] = devid;
1158 cmd->data[0] |= (qdep & 0xff) << 24;
1159 cmd->data[1] = devid;
1160 cmd->data[2] = lower_32_bits(inv_address);
1161 cmd->data[3] = upper_32_bits(inv_address);
1163 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1164 cmd->data[1] |= (pasid & 0xff) << 16;
1165 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1168 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1171 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1172 int status, int tag, u8 gn)
1174 memset(cmd, 0, sizeof(*cmd));
1176 cmd->data[0] = devid;
1178 cmd->data[1] = pasid;
1179 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
1181 cmd->data[3] = tag & 0x1ff;
1182 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1184 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1187 static void build_inv_all(struct iommu_cmd *cmd)
1189 memset(cmd, 0, sizeof(*cmd));
1190 CMD_SET_TYPE(cmd, CMD_INV_ALL);
1193 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1195 memset(cmd, 0, sizeof(*cmd));
1196 cmd->data[0] = devid;
1197 CMD_SET_TYPE(cmd, CMD_INV_IRT);
1201 * Writes the command to the IOMMUs command buffer and informs the
1202 * hardware about the new command.
1204 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1205 struct iommu_cmd *cmd,
1208 unsigned int count = 0;
1209 u32 left, next_tail;
1211 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1213 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1216 /* Skip udelay() the first time around */
1218 if (count == LOOP_TIMEOUT) {
1219 pr_err("Command buffer timeout\n");
1226 /* Update head and recheck remaining space */
1227 iommu->cmd_buf_head = readl(iommu->mmio_base +
1228 MMIO_CMD_HEAD_OFFSET);
1233 copy_cmd_to_buffer(iommu, cmd);
1235 /* Do we need to make sure all commands are processed? */
1236 iommu->need_sync = sync;
1241 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1242 struct iommu_cmd *cmd,
1245 unsigned long flags;
1248 raw_spin_lock_irqsave(&iommu->lock, flags);
1249 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1250 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1255 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1257 return iommu_queue_command_sync(iommu, cmd, true);
1261 * This function queues a completion wait command into the command
1262 * buffer of an IOMMU
1264 static int iommu_completion_wait(struct amd_iommu *iommu)
1266 struct iommu_cmd cmd;
1267 unsigned long flags;
1271 if (!iommu->need_sync)
1274 data = atomic64_add_return(1, &iommu->cmd_sem_val);
1275 build_completion_wait(&cmd, iommu, data);
1277 raw_spin_lock_irqsave(&iommu->lock, flags);
1279 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1283 ret = wait_on_sem(iommu, data);
1286 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1291 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1293 struct iommu_cmd cmd;
1295 build_inv_dte(&cmd, devid);
1297 return iommu_queue_command(iommu, &cmd);
1300 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1303 u16 last_bdf = iommu->pci_seg->last_bdf;
1305 for (devid = 0; devid <= last_bdf; ++devid)
1306 iommu_flush_dte(iommu, devid);
1308 iommu_completion_wait(iommu);
1312 * This function uses heavy locking and may disable irqs for some time. But
1313 * this is no issue because it is only called during resume.
1315 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1318 u16 last_bdf = iommu->pci_seg->last_bdf;
1320 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
1321 struct iommu_cmd cmd;
1322 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1323 dom_id, IOMMU_NO_PASID, false);
1324 iommu_queue_command(iommu, &cmd);
1327 iommu_completion_wait(iommu);
1330 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1332 struct iommu_cmd cmd;
1334 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1335 dom_id, IOMMU_NO_PASID, false);
1336 iommu_queue_command(iommu, &cmd);
1338 iommu_completion_wait(iommu);
1341 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1343 struct iommu_cmd cmd;
1345 build_inv_all(&cmd);
1347 iommu_queue_command(iommu, &cmd);
1348 iommu_completion_wait(iommu);
1351 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1353 struct iommu_cmd cmd;
1355 build_inv_irt(&cmd, devid);
1357 iommu_queue_command(iommu, &cmd);
1360 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1363 u16 last_bdf = iommu->pci_seg->last_bdf;
1365 if (iommu->irtcachedis_enabled)
1368 for (devid = 0; devid <= last_bdf; devid++)
1369 iommu_flush_irt(iommu, devid);
1371 iommu_completion_wait(iommu);
1374 void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
1376 if (check_feature(FEATURE_IA)) {
1377 amd_iommu_flush_all(iommu);
1379 amd_iommu_flush_dte_all(iommu);
1380 amd_iommu_flush_irt_all(iommu);
1381 amd_iommu_flush_tlb_all(iommu);
1386 * Command send function for flushing on-device TLB
1388 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
1389 size_t size, ioasid_t pasid, bool gn)
1391 struct amd_iommu *iommu;
1392 struct iommu_cmd cmd;
1395 qdep = dev_data->ats_qdep;
1396 iommu = rlookup_amd_iommu(dev_data->dev);
1400 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
1403 return iommu_queue_command(iommu, &cmd);
1406 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1408 struct amd_iommu *iommu = data;
1410 return iommu_flush_dte(iommu, alias);
1414 * Command send function for invalidating a device table entry
1416 static int device_flush_dte(struct iommu_dev_data *dev_data)
1418 struct amd_iommu *iommu;
1419 struct pci_dev *pdev = NULL;
1420 struct amd_iommu_pci_seg *pci_seg;
1424 iommu = rlookup_amd_iommu(dev_data->dev);
1428 if (dev_is_pci(dev_data->dev))
1429 pdev = to_pci_dev(dev_data->dev);
1432 ret = pci_for_each_dma_alias(pdev,
1433 device_flush_dte_alias, iommu);
1435 ret = iommu_flush_dte(iommu, dev_data->devid);
1439 pci_seg = iommu->pci_seg;
1440 alias = pci_seg->alias_table[dev_data->devid];
1441 if (alias != dev_data->devid) {
1442 ret = iommu_flush_dte(iommu, alias);
1447 if (dev_data->ats_enabled) {
1448 /* Invalidate the entire contents of an IOTLB */
1449 ret = device_flush_iotlb(dev_data, 0, ~0UL,
1450 IOMMU_NO_PASID, false);
1457 * TLB invalidation function which is called from the mapping functions.
1458 * It invalidates a single PTE if the range to flush is within a single
1459 * page. Otherwise it flushes the whole TLB of the IOMMU.
1461 static void __domain_flush_pages(struct protection_domain *domain,
1462 u64 address, size_t size)
1464 struct iommu_dev_data *dev_data;
1465 struct iommu_cmd cmd;
1467 ioasid_t pasid = IOMMU_NO_PASID;
1470 if (pdom_is_v2_pgtbl_mode(domain))
1473 build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
1475 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1476 if (!domain->dev_iommu[i])
1480 * Devices of this domain are behind this IOMMU
1481 * We need a TLB flush
1483 ret |= iommu_queue_command(amd_iommus[i], &cmd);
1486 list_for_each_entry(dev_data, &domain->dev_list, list) {
1488 if (!dev_data->ats_enabled)
1491 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
1497 void amd_iommu_domain_flush_pages(struct protection_domain *domain,
1498 u64 address, size_t size)
1500 if (likely(!amd_iommu_np_cache)) {
1501 __domain_flush_pages(domain, address, size);
1503 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1504 amd_iommu_domain_flush_complete(domain);
1510 * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
1511 * In such setups it is best to avoid flushes of ranges which are not
1512 * naturally aligned, since it would lead to flushes of unmodified
1513 * PTEs. Such flushes would require the hypervisor to do more work than
1514 * necessary. Therefore, perform repeated flushes of aligned ranges
1515 * until you cover the range. Each iteration flushes the smaller
1516 * between the natural alignment of the address that we flush and the
1517 * greatest naturally aligned region that fits in the range.
1520 int addr_alignment = __ffs(address);
1521 int size_alignment = __fls(size);
1526 * size is always non-zero, but address might be zero, causing
1527 * addr_alignment to be negative. As the casting of the
1528 * argument in __ffs(address) to long might trim the high bits
1529 * of the address on x86-32, cast to long when doing the check.
1531 if (likely((unsigned long)address != 0))
1532 min_alignment = min(addr_alignment, size_alignment);
1534 min_alignment = size_alignment;
1536 flush_size = 1ul << min_alignment;
1538 __domain_flush_pages(domain, address, flush_size);
1539 address += flush_size;
1543 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1544 amd_iommu_domain_flush_complete(domain);
1547 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1548 static void amd_iommu_domain_flush_all(struct protection_domain *domain)
1550 amd_iommu_domain_flush_pages(domain, 0,
1551 CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1554 void amd_iommu_domain_flush_complete(struct protection_domain *domain)
1558 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1559 if (domain && !domain->dev_iommu[i])
1563 * Devices of this domain are behind this IOMMU
1564 * We need to wait for completion of all commands.
1566 iommu_completion_wait(amd_iommus[i]);
1570 /* Flush the not present cache if it exists */
1571 static void domain_flush_np_cache(struct protection_domain *domain,
1572 dma_addr_t iova, size_t size)
1574 if (unlikely(amd_iommu_np_cache)) {
1575 unsigned long flags;
1577 spin_lock_irqsave(&domain->lock, flags);
1578 amd_iommu_domain_flush_pages(domain, iova, size);
1579 spin_unlock_irqrestore(&domain->lock, flags);
1585 * This function flushes the DTEs for all devices in domain
1587 static void domain_flush_devices(struct protection_domain *domain)
1589 struct iommu_dev_data *dev_data;
1591 list_for_each_entry(dev_data, &domain->dev_list, list)
1592 device_flush_dte(dev_data);
1595 /****************************************************************************
1597 * The next functions belong to the domain allocation. A domain is
1598 * allocated for every IOMMU as the default domain. If device isolation
1599 * is enabled, every device get its own domain. The most important thing
1600 * about domains is the page table mapping the DMA address space they
1603 ****************************************************************************/
1605 static u16 domain_id_alloc(void)
1609 spin_lock(&pd_bitmap_lock);
1610 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1612 if (id > 0 && id < MAX_DOMAIN_ID)
1613 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1616 spin_unlock(&pd_bitmap_lock);
1621 static void domain_id_free(int id)
1623 spin_lock(&pd_bitmap_lock);
1624 if (id > 0 && id < MAX_DOMAIN_ID)
1625 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1626 spin_unlock(&pd_bitmap_lock);
1629 static void free_gcr3_tbl_level1(u64 *tbl)
1634 for (i = 0; i < 512; ++i) {
1635 if (!(tbl[i] & GCR3_VALID))
1638 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1640 free_page((unsigned long)ptr);
1644 static void free_gcr3_tbl_level2(u64 *tbl)
1649 for (i = 0; i < 512; ++i) {
1650 if (!(tbl[i] & GCR3_VALID))
1653 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1655 free_gcr3_tbl_level1(ptr);
1659 static void free_gcr3_table(struct protection_domain *domain)
1661 if (domain->glx == 2)
1662 free_gcr3_tbl_level2(domain->gcr3_tbl);
1663 else if (domain->glx == 1)
1664 free_gcr3_tbl_level1(domain->gcr3_tbl);
1666 BUG_ON(domain->glx != 0);
1668 free_page((unsigned long)domain->gcr3_tbl);
1672 * Number of GCR3 table levels required. Level must be 4-Kbyte
1673 * page and can contain up to 512 entries.
1675 static int get_gcr3_levels(int pasids)
1680 return amd_iommu_max_glx_val;
1682 levels = get_count_order(pasids);
1684 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
1687 /* Note: This function expects iommu_domain->lock to be held prior calling the function. */
1688 static int setup_gcr3_table(struct protection_domain *domain, int pasids)
1690 int levels = get_gcr3_levels(pasids);
1692 if (levels > amd_iommu_max_glx_val)
1695 domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
1696 if (domain->gcr3_tbl == NULL)
1699 domain->glx = levels;
1700 domain->flags |= PD_IOMMUV2_MASK;
1702 amd_iommu_domain_update(domain);
1707 static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
1708 struct protection_domain *domain, bool ats, bool ppr)
1713 struct dev_table_entry *dev_table = get_dev_table(iommu);
1715 if (domain->iop.mode != PAGE_MODE_NONE)
1716 pte_root = iommu_virt_to_phys(domain->iop.root);
1718 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
1719 << DEV_ENTRY_MODE_SHIFT;
1721 pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
1724 * When SNP is enabled, Only set TV bit when IOMMU
1725 * page translation is in use.
1727 if (!amd_iommu_snp_en || (domain->id != 0))
1728 pte_root |= DTE_FLAG_TV;
1730 flags = dev_table[devid].data[1];
1733 flags |= DTE_FLAG_IOTLB;
1736 pte_root |= 1ULL << DEV_ENTRY_PPR;
1738 if (domain->dirty_tracking)
1739 pte_root |= DTE_FLAG_HAD;
1741 if (domain->flags & PD_IOMMUV2_MASK) {
1742 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
1743 u64 glx = domain->glx;
1746 pte_root |= DTE_FLAG_GV;
1747 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1749 /* First mask out possible old values for GCR3 table */
1750 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1753 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1756 /* Encode GCR3 table into DTE */
1757 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1760 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1763 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1766 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
1767 dev_table[devid].data[2] |=
1768 ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
1771 if (domain->flags & PD_GIOV_MASK)
1772 pte_root |= DTE_FLAG_GIOV;
1775 flags &= ~DEV_DOMID_MASK;
1776 flags |= domain->id;
1778 old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
1779 dev_table[devid].data[1] = flags;
1780 dev_table[devid].data[0] = pte_root;
1783 * A kdump kernel might be replacing a domain ID that was copied from
1784 * the previous kernel--if so, it needs to flush the translation cache
1785 * entries for the old domain ID that is being overwritten
1788 amd_iommu_flush_tlb_domid(iommu, old_domid);
1792 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
1794 struct dev_table_entry *dev_table = get_dev_table(iommu);
1796 /* remove entry from the device table seen by the hardware */
1797 dev_table[devid].data[0] = DTE_FLAG_V;
1799 if (!amd_iommu_snp_en)
1800 dev_table[devid].data[0] |= DTE_FLAG_TV;
1802 dev_table[devid].data[1] &= DTE_FLAG_MASK;
1804 amd_iommu_apply_erratum_63(iommu, devid);
1807 static void do_attach(struct iommu_dev_data *dev_data,
1808 struct protection_domain *domain)
1810 struct amd_iommu *iommu;
1813 iommu = rlookup_amd_iommu(dev_data->dev);
1816 ats = dev_data->ats_enabled;
1818 /* Update data structures */
1819 dev_data->domain = domain;
1820 list_add(&dev_data->list, &domain->dev_list);
1822 /* Update NUMA Node ID */
1823 if (domain->nid == NUMA_NO_NODE)
1824 domain->nid = dev_to_node(dev_data->dev);
1826 /* Do reference counting */
1827 domain->dev_iommu[iommu->index] += 1;
1828 domain->dev_cnt += 1;
1830 /* Update device table */
1831 set_dte_entry(iommu, dev_data->devid, domain,
1832 ats, dev_data->ppr);
1833 clone_aliases(iommu, dev_data->dev);
1835 device_flush_dte(dev_data);
1838 static void do_detach(struct iommu_dev_data *dev_data)
1840 struct protection_domain *domain = dev_data->domain;
1841 struct amd_iommu *iommu;
1843 iommu = rlookup_amd_iommu(dev_data->dev);
1847 /* Update data structures */
1848 dev_data->domain = NULL;
1849 list_del(&dev_data->list);
1850 clear_dte_entry(iommu, dev_data->devid);
1851 clone_aliases(iommu, dev_data->dev);
1853 /* Flush the DTE entry */
1854 device_flush_dte(dev_data);
1856 /* Flush IOTLB and wait for the flushes to finish */
1857 amd_iommu_domain_flush_all(domain);
1859 /* decrease reference counters - needs to happen after the flushes */
1860 domain->dev_iommu[iommu->index] -= 1;
1861 domain->dev_cnt -= 1;
1865 * If a device is not yet associated with a domain, this function makes the
1866 * device visible in the domain
1868 static int attach_device(struct device *dev,
1869 struct protection_domain *domain)
1871 struct iommu_dev_data *dev_data;
1872 unsigned long flags;
1875 spin_lock_irqsave(&domain->lock, flags);
1877 dev_data = dev_iommu_priv_get(dev);
1879 spin_lock(&dev_data->lock);
1881 if (dev_data->domain != NULL) {
1886 if (dev_is_pci(dev))
1887 pdev_enable_caps(to_pci_dev(dev));
1889 do_attach(dev_data, domain);
1892 spin_unlock(&dev_data->lock);
1894 spin_unlock_irqrestore(&domain->lock, flags);
1900 * Removes a device from a protection domain (with devtable_lock held)
1902 static void detach_device(struct device *dev)
1904 struct protection_domain *domain;
1905 struct iommu_dev_data *dev_data;
1906 unsigned long flags;
1908 dev_data = dev_iommu_priv_get(dev);
1909 domain = dev_data->domain;
1911 spin_lock_irqsave(&domain->lock, flags);
1913 spin_lock(&dev_data->lock);
1916 * First check if the device is still attached. It might already
1917 * be detached from its domain because the generic
1918 * iommu_detach_group code detached it and we try again here in
1919 * our alias handling.
1921 if (WARN_ON(!dev_data->domain))
1924 do_detach(dev_data);
1926 if (dev_is_pci(dev))
1927 pdev_disable_caps(to_pci_dev(dev));
1930 spin_unlock(&dev_data->lock);
1932 spin_unlock_irqrestore(&domain->lock, flags);
1935 static struct iommu_device *amd_iommu_probe_device(struct device *dev)
1937 struct iommu_device *iommu_dev;
1938 struct amd_iommu *iommu;
1941 if (!check_device(dev))
1942 return ERR_PTR(-ENODEV);
1944 iommu = rlookup_amd_iommu(dev);
1946 return ERR_PTR(-ENODEV);
1948 /* Not registered yet? */
1949 if (!iommu->iommu.ops)
1950 return ERR_PTR(-ENODEV);
1952 if (dev_iommu_priv_get(dev))
1953 return &iommu->iommu;
1955 ret = iommu_init_device(iommu, dev);
1957 if (ret != -ENOTSUPP)
1958 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
1959 iommu_dev = ERR_PTR(ret);
1960 iommu_ignore_device(iommu, dev);
1962 amd_iommu_set_pci_msi_domain(dev, iommu);
1963 iommu_dev = &iommu->iommu;
1966 iommu_completion_wait(iommu);
1971 static void amd_iommu_probe_finalize(struct device *dev)
1973 /* Domains are initialized for this device - have a look what we ended up with */
1974 set_dma_ops(dev, NULL);
1975 iommu_setup_dma_ops(dev, 0, U64_MAX);
1978 static void amd_iommu_release_device(struct device *dev)
1980 struct amd_iommu *iommu;
1982 if (!check_device(dev))
1985 iommu = rlookup_amd_iommu(dev);
1989 amd_iommu_uninit_device(dev);
1990 iommu_completion_wait(iommu);
1993 static struct iommu_group *amd_iommu_device_group(struct device *dev)
1995 if (dev_is_pci(dev))
1996 return pci_device_group(dev);
1998 return acpihid_device_group(dev);
2001 /*****************************************************************************
2003 * The next functions belong to the dma_ops mapping/unmapping code.
2005 *****************************************************************************/
2007 static void update_device_table(struct protection_domain *domain)
2009 struct iommu_dev_data *dev_data;
2011 list_for_each_entry(dev_data, &domain->dev_list, list) {
2012 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
2016 set_dte_entry(iommu, dev_data->devid, domain,
2017 dev_data->ats_enabled, dev_data->ppr);
2018 clone_aliases(iommu, dev_data->dev);
2022 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
2024 update_device_table(domain);
2025 domain_flush_devices(domain);
2028 void amd_iommu_domain_update(struct protection_domain *domain)
2030 /* Update device table */
2031 amd_iommu_update_and_flush_device_table(domain);
2033 /* Flush domain TLB(s) and wait for completion */
2034 amd_iommu_domain_flush_all(domain);
2037 /*****************************************************************************
2039 * The following functions belong to the exported interface of AMD IOMMU
2041 * This interface allows access to lower level functions of the IOMMU
2042 * like protection domain handling and assignement of devices to domains
2043 * which is not possible with the dma_ops interface.
2045 *****************************************************************************/
2047 static void cleanup_domain(struct protection_domain *domain)
2049 struct iommu_dev_data *entry;
2051 lockdep_assert_held(&domain->lock);
2053 if (!domain->dev_cnt)
2056 while (!list_empty(&domain->dev_list)) {
2057 entry = list_first_entry(&domain->dev_list,
2058 struct iommu_dev_data, list);
2059 BUG_ON(!entry->domain);
2062 WARN_ON(domain->dev_cnt != 0);
2065 static void protection_domain_free(struct protection_domain *domain)
2070 if (domain->iop.pgtbl_cfg.tlb)
2071 free_io_pgtable_ops(&domain->iop.iop.ops);
2073 if (domain->flags & PD_IOMMUV2_MASK)
2074 free_gcr3_table(domain);
2076 if (domain->iop.root)
2077 free_page((unsigned long)domain->iop.root);
2080 domain_id_free(domain->id);
2085 static int protection_domain_init_v1(struct protection_domain *domain, int mode)
2087 u64 *pt_root = NULL;
2089 BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
2091 if (mode != PAGE_MODE_NONE) {
2092 pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2097 amd_iommu_domain_set_pgtable(domain, pt_root, mode);
2102 static int protection_domain_init_v2(struct protection_domain *domain)
2104 domain->flags |= PD_GIOV_MASK;
2106 domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
2108 if (setup_gcr3_table(domain, 1))
2114 static struct protection_domain *protection_domain_alloc(unsigned int type)
2116 struct io_pgtable_ops *pgtbl_ops;
2117 struct protection_domain *domain;
2121 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2125 domain->id = domain_id_alloc();
2129 spin_lock_init(&domain->lock);
2130 INIT_LIST_HEAD(&domain->dev_list);
2131 domain->nid = NUMA_NO_NODE;
2134 /* No need to allocate io pgtable ops in passthrough mode */
2135 case IOMMU_DOMAIN_IDENTITY:
2137 case IOMMU_DOMAIN_DMA:
2138 pgtable = amd_iommu_pgtable;
2141 * Force IOMMU v1 page table when allocating
2142 * domain for pass-through devices.
2144 case IOMMU_DOMAIN_UNMANAGED:
2145 pgtable = AMD_IOMMU_V1;
2153 ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
2156 ret = protection_domain_init_v2(domain);
2166 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
2172 protection_domain_free(domain);
2176 static inline u64 dma_max_address(void)
2178 if (amd_iommu_pgtable == AMD_IOMMU_V1)
2181 /* V2 with 4/5 level page table */
2182 return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
2185 static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2187 return iommu && (iommu->features & FEATURE_HDSUP);
2190 static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2191 struct device *dev, u32 flags)
2193 bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
2194 struct protection_domain *domain;
2195 struct amd_iommu *iommu = NULL;
2198 iommu = rlookup_amd_iommu(dev);
2200 return ERR_PTR(-ENODEV);
2204 * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
2205 * default to use IOMMU_DOMAIN_DMA[_FQ].
2207 if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
2208 return ERR_PTR(-EINVAL);
2210 if (dirty_tracking && !amd_iommu_hd_support(iommu))
2211 return ERR_PTR(-EOPNOTSUPP);
2213 domain = protection_domain_alloc(type);
2215 return ERR_PTR(-ENOMEM);
2217 domain->domain.geometry.aperture_start = 0;
2218 domain->domain.geometry.aperture_end = dma_max_address();
2219 domain->domain.geometry.force_aperture = true;
2222 domain->domain.type = type;
2223 domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap;
2224 domain->domain.ops = iommu->iommu.ops->default_domain_ops;
2227 domain->domain.dirty_ops = &amd_dirty_ops;
2230 return &domain->domain;
2233 static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
2235 struct iommu_domain *domain;
2237 domain = do_iommu_domain_alloc(type, NULL, 0);
2244 static struct iommu_domain *
2245 amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
2246 struct iommu_domain *parent,
2247 const struct iommu_user_data *user_data)
2250 unsigned int type = IOMMU_DOMAIN_UNMANAGED;
2252 if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data)
2253 return ERR_PTR(-EOPNOTSUPP);
2255 return do_iommu_domain_alloc(type, dev, flags);
2258 static void amd_iommu_domain_free(struct iommu_domain *dom)
2260 struct protection_domain *domain;
2261 unsigned long flags;
2266 domain = to_pdomain(dom);
2268 spin_lock_irqsave(&domain->lock, flags);
2270 cleanup_domain(domain);
2272 spin_unlock_irqrestore(&domain->lock, flags);
2274 protection_domain_free(domain);
2277 static int amd_iommu_attach_device(struct iommu_domain *dom,
2280 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2281 struct protection_domain *domain = to_pdomain(dom);
2282 struct amd_iommu *iommu = rlookup_amd_iommu(dev);
2286 * Skip attach device to domain if new domain is same as
2287 * devices current domain
2289 if (dev_data->domain == domain)
2292 dev_data->defer_attach = false;
2295 * Restrict to devices with compatible IOMMU hardware support
2296 * when enforcement of dirty tracking is enabled.
2298 if (dom->dirty_ops && !amd_iommu_hd_support(iommu))
2301 if (dev_data->domain)
2304 ret = attach_device(dev, domain);
2306 #ifdef CONFIG_IRQ_REMAP
2307 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2308 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2309 dev_data->use_vapic = 1;
2311 dev_data->use_vapic = 0;
2315 iommu_completion_wait(iommu);
2320 static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2321 unsigned long iova, size_t size)
2323 struct protection_domain *domain = to_pdomain(dom);
2324 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2327 domain_flush_np_cache(domain, iova, size);
2331 static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
2332 phys_addr_t paddr, size_t pgsize, size_t pgcount,
2333 int iommu_prot, gfp_t gfp, size_t *mapped)
2335 struct protection_domain *domain = to_pdomain(dom);
2336 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2340 if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2341 (domain->iop.mode == PAGE_MODE_NONE))
2344 if (iommu_prot & IOMMU_READ)
2345 prot |= IOMMU_PROT_IR;
2346 if (iommu_prot & IOMMU_WRITE)
2347 prot |= IOMMU_PROT_IW;
2349 if (ops->map_pages) {
2350 ret = ops->map_pages(ops, iova, paddr, pgsize,
2351 pgcount, prot, gfp, mapped);
2357 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
2358 struct iommu_iotlb_gather *gather,
2359 unsigned long iova, size_t size)
2362 * AMD's IOMMU can flush as many pages as necessary in a single flush.
2363 * Unless we run in a virtual machine, which can be inferred according
2364 * to whether "non-present cache" is on, it is probably best to prefer
2365 * (potentially) too extensive TLB flushing (i.e., more misses) over
2366 * mutliple TLB flushes (i.e., more flushes). For virtual machines the
2367 * hypervisor needs to synchronize the host IOMMU PTEs with those of
2368 * the guest, and the trade-off is different: unnecessary TLB flushes
2369 * should be avoided.
2371 if (amd_iommu_np_cache &&
2372 iommu_iotlb_gather_is_disjoint(gather, iova, size))
2373 iommu_iotlb_sync(domain, gather);
2375 iommu_iotlb_gather_add_range(gather, iova, size);
2378 static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
2379 size_t pgsize, size_t pgcount,
2380 struct iommu_iotlb_gather *gather)
2382 struct protection_domain *domain = to_pdomain(dom);
2383 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2386 if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2387 (domain->iop.mode == PAGE_MODE_NONE))
2390 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
2393 amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
2398 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2401 struct protection_domain *domain = to_pdomain(dom);
2402 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2404 return ops->iova_to_phys(ops, iova);
2407 static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
2410 case IOMMU_CAP_CACHE_COHERENCY:
2412 case IOMMU_CAP_NOEXEC:
2414 case IOMMU_CAP_PRE_BOOT_PROTECTION:
2415 return amdr_ivrs_remap_support;
2416 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
2418 case IOMMU_CAP_DEFERRED_FLUSH:
2420 case IOMMU_CAP_DIRTY_TRACKING: {
2421 struct amd_iommu *iommu = rlookup_amd_iommu(dev);
2423 return amd_iommu_hd_support(iommu);
2432 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
2435 struct protection_domain *pdomain = to_pdomain(domain);
2436 struct dev_table_entry *dev_table;
2437 struct iommu_dev_data *dev_data;
2438 bool domain_flush = false;
2439 struct amd_iommu *iommu;
2440 unsigned long flags;
2443 spin_lock_irqsave(&pdomain->lock, flags);
2444 if (!(pdomain->dirty_tracking ^ enable)) {
2445 spin_unlock_irqrestore(&pdomain->lock, flags);
2449 list_for_each_entry(dev_data, &pdomain->dev_list, list) {
2450 iommu = rlookup_amd_iommu(dev_data->dev);
2454 dev_table = get_dev_table(iommu);
2455 pte_root = dev_table[dev_data->devid].data[0];
2457 pte_root = (enable ? pte_root | DTE_FLAG_HAD :
2458 pte_root & ~DTE_FLAG_HAD);
2460 /* Flush device DTE */
2461 dev_table[dev_data->devid].data[0] = pte_root;
2462 device_flush_dte(dev_data);
2463 domain_flush = true;
2466 /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
2468 amd_iommu_domain_flush_all(pdomain);
2470 pdomain->dirty_tracking = enable;
2471 spin_unlock_irqrestore(&pdomain->lock, flags);
2476 static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
2477 unsigned long iova, size_t size,
2478 unsigned long flags,
2479 struct iommu_dirty_bitmap *dirty)
2481 struct protection_domain *pdomain = to_pdomain(domain);
2482 struct io_pgtable_ops *ops = &pdomain->iop.iop.ops;
2483 unsigned long lflags;
2485 if (!ops || !ops->read_and_clear_dirty)
2488 spin_lock_irqsave(&pdomain->lock, lflags);
2489 if (!pdomain->dirty_tracking && dirty->bitmap) {
2490 spin_unlock_irqrestore(&pdomain->lock, lflags);
2493 spin_unlock_irqrestore(&pdomain->lock, lflags);
2495 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty);
2498 static void amd_iommu_get_resv_regions(struct device *dev,
2499 struct list_head *head)
2501 struct iommu_resv_region *region;
2502 struct unity_map_entry *entry;
2503 struct amd_iommu *iommu;
2504 struct amd_iommu_pci_seg *pci_seg;
2507 sbdf = get_device_sbdf_id(dev);
2511 devid = PCI_SBDF_TO_DEVID(sbdf);
2512 iommu = rlookup_amd_iommu(dev);
2515 pci_seg = iommu->pci_seg;
2517 list_for_each_entry(entry, &pci_seg->unity_map, list) {
2521 if (devid < entry->devid_start || devid > entry->devid_end)
2524 type = IOMMU_RESV_DIRECT;
2525 length = entry->address_end - entry->address_start;
2526 if (entry->prot & IOMMU_PROT_IR)
2528 if (entry->prot & IOMMU_PROT_IW)
2529 prot |= IOMMU_WRITE;
2530 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
2531 /* Exclusion range */
2532 type = IOMMU_RESV_RESERVED;
2534 region = iommu_alloc_resv_region(entry->address_start,
2538 dev_err(dev, "Out of memory allocating dm-regions\n");
2541 list_add_tail(®ion->list, head);
2544 region = iommu_alloc_resv_region(MSI_RANGE_START,
2545 MSI_RANGE_END - MSI_RANGE_START + 1,
2546 0, IOMMU_RESV_MSI, GFP_KERNEL);
2549 list_add_tail(®ion->list, head);
2551 region = iommu_alloc_resv_region(HT_RANGE_START,
2552 HT_RANGE_END - HT_RANGE_START + 1,
2553 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
2556 list_add_tail(®ion->list, head);
2559 bool amd_iommu_is_attach_deferred(struct device *dev)
2561 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2563 return dev_data->defer_attach;
2566 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2568 struct protection_domain *dom = to_pdomain(domain);
2569 unsigned long flags;
2571 spin_lock_irqsave(&dom->lock, flags);
2572 amd_iommu_domain_flush_all(dom);
2573 spin_unlock_irqrestore(&dom->lock, flags);
2576 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2577 struct iommu_iotlb_gather *gather)
2579 struct protection_domain *dom = to_pdomain(domain);
2580 unsigned long flags;
2582 spin_lock_irqsave(&dom->lock, flags);
2583 amd_iommu_domain_flush_pages(dom, gather->start,
2584 gather->end - gather->start + 1);
2585 spin_unlock_irqrestore(&dom->lock, flags);
2588 static int amd_iommu_def_domain_type(struct device *dev)
2590 struct iommu_dev_data *dev_data;
2592 dev_data = dev_iommu_priv_get(dev);
2597 * Do not identity map IOMMUv2 capable devices when:
2598 * - memory encryption is active, because some of those devices
2599 * (AMD GPUs) don't have the encryption bit in their DMA-mask
2600 * and require remapping.
2601 * - SNP is enabled, because it prohibits DTE[Mode]=0.
2603 if (pdev_pasid_supported(dev_data) &&
2604 !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
2605 !amd_iommu_snp_en) {
2606 return IOMMU_DOMAIN_IDENTITY;
2612 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
2614 /* IOMMU_PTE_FC is always set */
2618 static const struct iommu_dirty_ops amd_dirty_ops = {
2619 .set_dirty_tracking = amd_iommu_set_dirty_tracking,
2620 .read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
2623 const struct iommu_ops amd_iommu_ops = {
2624 .capable = amd_iommu_capable,
2625 .domain_alloc = amd_iommu_domain_alloc,
2626 .domain_alloc_user = amd_iommu_domain_alloc_user,
2627 .probe_device = amd_iommu_probe_device,
2628 .release_device = amd_iommu_release_device,
2629 .probe_finalize = amd_iommu_probe_finalize,
2630 .device_group = amd_iommu_device_group,
2631 .get_resv_regions = amd_iommu_get_resv_regions,
2632 .is_attach_deferred = amd_iommu_is_attach_deferred,
2633 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
2634 .def_domain_type = amd_iommu_def_domain_type,
2635 .default_domain_ops = &(const struct iommu_domain_ops) {
2636 .attach_dev = amd_iommu_attach_device,
2637 .map_pages = amd_iommu_map_pages,
2638 .unmap_pages = amd_iommu_unmap_pages,
2639 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2640 .iova_to_phys = amd_iommu_iova_to_phys,
2641 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2642 .iotlb_sync = amd_iommu_iotlb_sync,
2643 .free = amd_iommu_domain_free,
2644 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
2648 static int __flush_pasid(struct protection_domain *domain, u32 pasid,
2649 u64 address, size_t size)
2651 struct iommu_dev_data *dev_data;
2652 struct iommu_cmd cmd;
2655 if (!(domain->flags & PD_IOMMUV2_MASK))
2658 build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
2661 * IOMMU TLB needs to be flushed before Device TLB to
2662 * prevent device TLB refill from IOMMU TLB
2664 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
2665 if (domain->dev_iommu[i] == 0)
2668 ret = iommu_queue_command(amd_iommus[i], &cmd);
2673 /* Wait until IOMMU TLB flushes are complete */
2674 amd_iommu_domain_flush_complete(domain);
2676 /* Now flush device TLBs */
2677 list_for_each_entry(dev_data, &domain->dev_list, list) {
2678 struct amd_iommu *iommu;
2682 There might be non-IOMMUv2 capable devices in an IOMMUv2
2685 if (!dev_data->ats_enabled)
2688 qdep = dev_data->ats_qdep;
2689 iommu = rlookup_amd_iommu(dev_data->dev);
2692 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
2693 address, size, pasid, true);
2695 ret = iommu_queue_command(iommu, &cmd);
2700 /* Wait until all device TLBs are flushed */
2701 amd_iommu_domain_flush_complete(domain);
2710 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
2713 return __flush_pasid(domain, pasid, address, PAGE_SIZE);
2716 int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
2719 struct protection_domain *domain = to_pdomain(dom);
2720 unsigned long flags;
2723 spin_lock_irqsave(&domain->lock, flags);
2724 ret = __amd_iommu_flush_page(domain, pasid, address);
2725 spin_unlock_irqrestore(&domain->lock, flags);
2730 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
2732 return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
2735 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
2737 struct protection_domain *domain = to_pdomain(dom);
2738 unsigned long flags;
2741 spin_lock_irqsave(&domain->lock, flags);
2742 ret = __amd_iommu_flush_tlb(domain, pasid);
2743 spin_unlock_irqrestore(&domain->lock, flags);
2748 static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
2755 index = (pasid >> (9 * level)) & 0x1ff;
2761 if (!(*pte & GCR3_VALID)) {
2765 root = (void *)get_zeroed_page(GFP_ATOMIC);
2769 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
2772 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2780 static int __set_gcr3(struct protection_domain *domain, u32 pasid,
2785 if (domain->iop.mode != PAGE_MODE_NONE)
2788 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
2792 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
2794 return __amd_iommu_flush_tlb(domain, pasid);
2797 static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
2801 if (domain->iop.mode != PAGE_MODE_NONE)
2804 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
2810 return __amd_iommu_flush_tlb(domain, pasid);
2813 int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
2816 struct protection_domain *domain = to_pdomain(dom);
2817 unsigned long flags;
2820 spin_lock_irqsave(&domain->lock, flags);
2821 ret = __set_gcr3(domain, pasid, cr3);
2822 spin_unlock_irqrestore(&domain->lock, flags);
2827 int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
2829 struct protection_domain *domain = to_pdomain(dom);
2830 unsigned long flags;
2833 spin_lock_irqsave(&domain->lock, flags);
2834 ret = __clear_gcr3(domain, pasid);
2835 spin_unlock_irqrestore(&domain->lock, flags);
2840 int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
2841 int status, int tag)
2843 struct iommu_dev_data *dev_data;
2844 struct amd_iommu *iommu;
2845 struct iommu_cmd cmd;
2847 dev_data = dev_iommu_priv_get(&pdev->dev);
2848 iommu = rlookup_amd_iommu(&pdev->dev);
2852 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
2853 tag, dev_data->pri_tlp);
2855 return iommu_queue_command(iommu, &cmd);
2858 #ifdef CONFIG_IRQ_REMAP
2860 /*****************************************************************************
2862 * Interrupt Remapping Implementation
2864 *****************************************************************************/
2866 static struct irq_chip amd_ir_chip;
2867 static DEFINE_SPINLOCK(iommu_table_lock);
2869 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
2873 unsigned long flags;
2874 struct iommu_cmd cmd, cmd2;
2876 if (iommu->irtcachedis_enabled)
2879 build_inv_irt(&cmd, devid);
2880 data = atomic64_add_return(1, &iommu->cmd_sem_val);
2881 build_completion_wait(&cmd2, iommu, data);
2883 raw_spin_lock_irqsave(&iommu->lock, flags);
2884 ret = __iommu_queue_command_sync(iommu, &cmd, true);
2887 ret = __iommu_queue_command_sync(iommu, &cmd2, false);
2890 wait_on_sem(iommu, data);
2892 raw_spin_unlock_irqrestore(&iommu->lock, flags);
2895 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
2896 struct irq_remap_table *table)
2899 struct dev_table_entry *dev_table = get_dev_table(iommu);
2901 dte = dev_table[devid].data[2];
2902 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
2903 dte |= iommu_virt_to_phys(table->table);
2904 dte |= DTE_IRQ_REMAP_INTCTL;
2905 dte |= DTE_INTTABLEN;
2906 dte |= DTE_IRQ_REMAP_ENABLE;
2908 dev_table[devid].data[2] = dte;
2911 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
2913 struct irq_remap_table *table;
2914 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
2916 if (WARN_ONCE(!pci_seg->rlookup_table[devid],
2917 "%s: no iommu for devid %x:%x\n",
2918 __func__, pci_seg->id, devid))
2921 table = pci_seg->irq_lookup_table[devid];
2922 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
2923 __func__, pci_seg->id, devid))
2929 static struct irq_remap_table *__alloc_irq_table(void)
2931 struct irq_remap_table *table;
2933 table = kzalloc(sizeof(*table), GFP_KERNEL);
2937 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
2938 if (!table->table) {
2942 raw_spin_lock_init(&table->lock);
2944 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2945 memset(table->table, 0,
2946 MAX_IRQS_PER_TABLE * sizeof(u32));
2948 memset(table->table, 0,
2949 (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
2953 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
2954 struct irq_remap_table *table)
2956 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
2958 pci_seg->irq_lookup_table[devid] = table;
2959 set_dte_irq_entry(iommu, devid, table);
2960 iommu_flush_dte(iommu, devid);
2963 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
2966 struct irq_remap_table *table = data;
2967 struct amd_iommu_pci_seg *pci_seg;
2968 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
2973 pci_seg = iommu->pci_seg;
2974 pci_seg->irq_lookup_table[alias] = table;
2975 set_dte_irq_entry(iommu, alias, table);
2976 iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
2981 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
2982 u16 devid, struct pci_dev *pdev)
2984 struct irq_remap_table *table = NULL;
2985 struct irq_remap_table *new_table = NULL;
2986 struct amd_iommu_pci_seg *pci_seg;
2987 unsigned long flags;
2990 spin_lock_irqsave(&iommu_table_lock, flags);
2992 pci_seg = iommu->pci_seg;
2993 table = pci_seg->irq_lookup_table[devid];
2997 alias = pci_seg->alias_table[devid];
2998 table = pci_seg->irq_lookup_table[alias];
3000 set_remap_table_entry(iommu, devid, table);
3003 spin_unlock_irqrestore(&iommu_table_lock, flags);
3005 /* Nothing there yet, allocate new irq remapping table */
3006 new_table = __alloc_irq_table();
3010 spin_lock_irqsave(&iommu_table_lock, flags);
3012 table = pci_seg->irq_lookup_table[devid];
3016 table = pci_seg->irq_lookup_table[alias];
3018 set_remap_table_entry(iommu, devid, table);
3026 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
3029 set_remap_table_entry(iommu, devid, table);
3032 set_remap_table_entry(iommu, alias, table);
3035 iommu_completion_wait(iommu);
3038 spin_unlock_irqrestore(&iommu_table_lock, flags);
3041 kmem_cache_free(amd_iommu_irq_cache, new_table->table);
3047 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3048 bool align, struct pci_dev *pdev)
3050 struct irq_remap_table *table;
3051 int index, c, alignment = 1;
3052 unsigned long flags;
3054 table = alloc_irq_table(iommu, devid, pdev);
3059 alignment = roundup_pow_of_two(count);
3061 raw_spin_lock_irqsave(&table->lock, flags);
3063 /* Scan table for free entries */
3064 for (index = ALIGN(table->min_index, alignment), c = 0;
3065 index < MAX_IRQS_PER_TABLE;) {
3066 if (!iommu->irte_ops->is_allocated(table, index)) {
3070 index = ALIGN(index + 1, alignment);
3076 iommu->irte_ops->set_allocated(table, index - c + 1);
3088 raw_spin_unlock_irqrestore(&table->lock, flags);
3093 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3094 struct irte_ga *irte)
3096 struct irq_remap_table *table;
3097 struct irte_ga *entry;
3098 unsigned long flags;
3101 table = get_irq_table(iommu, devid);
3105 raw_spin_lock_irqsave(&table->lock, flags);
3107 entry = (struct irte_ga *)table->table;
3108 entry = &entry[index];
3111 * We use cmpxchg16 to atomically update the 128-bit IRTE,
3112 * and it cannot be updated by the hardware or other processors
3113 * behind us, so the return value of cmpxchg16 should be the
3114 * same as the old value.
3117 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte));
3119 raw_spin_unlock_irqrestore(&table->lock, flags);
3124 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3125 struct irte_ga *irte)
3129 ret = __modify_irte_ga(iommu, devid, index, irte);
3133 iommu_flush_irt_and_complete(iommu, devid);
3138 static int modify_irte(struct amd_iommu *iommu,
3139 u16 devid, int index, union irte *irte)
3141 struct irq_remap_table *table;
3142 unsigned long flags;
3144 table = get_irq_table(iommu, devid);
3148 raw_spin_lock_irqsave(&table->lock, flags);
3149 table->table[index] = irte->val;
3150 raw_spin_unlock_irqrestore(&table->lock, flags);
3152 iommu_flush_irt_and_complete(iommu, devid);
3157 static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3159 struct irq_remap_table *table;
3160 unsigned long flags;
3162 table = get_irq_table(iommu, devid);
3166 raw_spin_lock_irqsave(&table->lock, flags);
3167 iommu->irte_ops->clear_allocated(table, index);
3168 raw_spin_unlock_irqrestore(&table->lock, flags);
3170 iommu_flush_irt_and_complete(iommu, devid);
3173 static void irte_prepare(void *entry,
3174 u32 delivery_mode, bool dest_mode,
3175 u8 vector, u32 dest_apicid, int devid)
3177 union irte *irte = (union irte *) entry;
3180 irte->fields.vector = vector;
3181 irte->fields.int_type = delivery_mode;
3182 irte->fields.destination = dest_apicid;
3183 irte->fields.dm = dest_mode;
3184 irte->fields.valid = 1;
3187 static void irte_ga_prepare(void *entry,
3188 u32 delivery_mode, bool dest_mode,
3189 u8 vector, u32 dest_apicid, int devid)
3191 struct irte_ga *irte = (struct irte_ga *) entry;
3195 irte->lo.fields_remap.int_type = delivery_mode;
3196 irte->lo.fields_remap.dm = dest_mode;
3197 irte->hi.fields.vector = vector;
3198 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
3199 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
3200 irte->lo.fields_remap.valid = 1;
3203 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3205 union irte *irte = (union irte *) entry;
3207 irte->fields.valid = 1;
3208 modify_irte(iommu, devid, index, irte);
3211 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3213 struct irte_ga *irte = (struct irte_ga *) entry;
3215 irte->lo.fields_remap.valid = 1;
3216 modify_irte_ga(iommu, devid, index, irte);
3219 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3221 union irte *irte = (union irte *) entry;
3223 irte->fields.valid = 0;
3224 modify_irte(iommu, devid, index, irte);
3227 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3229 struct irte_ga *irte = (struct irte_ga *) entry;
3231 irte->lo.fields_remap.valid = 0;
3232 modify_irte_ga(iommu, devid, index, irte);
3235 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3236 u8 vector, u32 dest_apicid)
3238 union irte *irte = (union irte *) entry;
3240 irte->fields.vector = vector;
3241 irte->fields.destination = dest_apicid;
3242 modify_irte(iommu, devid, index, irte);
3245 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3246 u8 vector, u32 dest_apicid)
3248 struct irte_ga *irte = (struct irte_ga *) entry;
3250 if (!irte->lo.fields_remap.guest_mode) {
3251 irte->hi.fields.vector = vector;
3252 irte->lo.fields_remap.destination =
3253 APICID_TO_IRTE_DEST_LO(dest_apicid);
3254 irte->hi.fields.destination =
3255 APICID_TO_IRTE_DEST_HI(dest_apicid);
3256 modify_irte_ga(iommu, devid, index, irte);
3260 #define IRTE_ALLOCATED (~1U)
3261 static void irte_set_allocated(struct irq_remap_table *table, int index)
3263 table->table[index] = IRTE_ALLOCATED;
3266 static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3268 struct irte_ga *ptr = (struct irte_ga *)table->table;
3269 struct irte_ga *irte = &ptr[index];
3271 memset(&irte->lo.val, 0, sizeof(u64));
3272 memset(&irte->hi.val, 0, sizeof(u64));
3273 irte->hi.fields.vector = 0xff;
3276 static bool irte_is_allocated(struct irq_remap_table *table, int index)
3278 union irte *ptr = (union irte *)table->table;
3279 union irte *irte = &ptr[index];
3281 return irte->val != 0;
3284 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3286 struct irte_ga *ptr = (struct irte_ga *)table->table;
3287 struct irte_ga *irte = &ptr[index];
3289 return irte->hi.fields.vector != 0;
3292 static void irte_clear_allocated(struct irq_remap_table *table, int index)
3294 table->table[index] = 0;
3297 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3299 struct irte_ga *ptr = (struct irte_ga *)table->table;
3300 struct irte_ga *irte = &ptr[index];
3302 memset(&irte->lo.val, 0, sizeof(u64));
3303 memset(&irte->hi.val, 0, sizeof(u64));
3306 static int get_devid(struct irq_alloc_info *info)
3308 switch (info->type) {
3309 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3310 return get_ioapic_devid(info->devid);
3311 case X86_IRQ_ALLOC_TYPE_HPET:
3312 return get_hpet_devid(info->devid);
3313 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3314 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3315 return get_device_sbdf_id(msi_desc_to_dev(info->desc));
3322 struct irq_remap_ops amd_iommu_irq_ops = {
3323 .prepare = amd_iommu_prepare,
3324 .enable = amd_iommu_enable,
3325 .disable = amd_iommu_disable,
3326 .reenable = amd_iommu_reenable,
3327 .enable_faulting = amd_iommu_enable_faulting,
3330 static void fill_msi_msg(struct msi_msg *msg, u32 index)
3333 msg->address_lo = 0;
3334 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3335 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3338 static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3339 struct irq_cfg *irq_cfg,
3340 struct irq_alloc_info *info,
3341 int devid, int index, int sub_handle)
3343 struct irq_2_irte *irte_info = &data->irq_2_irte;
3344 struct amd_iommu *iommu = data->iommu;
3349 data->irq_2_irte.devid = devid;
3350 data->irq_2_irte.index = index + sub_handle;
3351 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
3352 apic->dest_mode_logical, irq_cfg->vector,
3353 irq_cfg->dest_apicid, devid);
3355 switch (info->type) {
3356 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3357 case X86_IRQ_ALLOC_TYPE_HPET:
3358 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3359 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3360 fill_msi_msg(&data->msi_entry, irte_info->index);
3369 struct amd_irte_ops irte_32_ops = {
3370 .prepare = irte_prepare,
3371 .activate = irte_activate,
3372 .deactivate = irte_deactivate,
3373 .set_affinity = irte_set_affinity,
3374 .set_allocated = irte_set_allocated,
3375 .is_allocated = irte_is_allocated,
3376 .clear_allocated = irte_clear_allocated,
3379 struct amd_irte_ops irte_128_ops = {
3380 .prepare = irte_ga_prepare,
3381 .activate = irte_ga_activate,
3382 .deactivate = irte_ga_deactivate,
3383 .set_affinity = irte_ga_set_affinity,
3384 .set_allocated = irte_ga_set_allocated,
3385 .is_allocated = irte_ga_is_allocated,
3386 .clear_allocated = irte_ga_clear_allocated,
3389 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3390 unsigned int nr_irqs, void *arg)
3392 struct irq_alloc_info *info = arg;
3393 struct irq_data *irq_data;
3394 struct amd_ir_data *data = NULL;
3395 struct amd_iommu *iommu;
3396 struct irq_cfg *cfg;
3397 int i, ret, devid, seg, sbdf;
3402 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
3405 sbdf = get_devid(info);
3409 seg = PCI_SBDF_TO_SEGID(sbdf);
3410 devid = PCI_SBDF_TO_DEVID(sbdf);
3411 iommu = __rlookup_amd_iommu(seg, devid);
3415 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3419 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3420 struct irq_remap_table *table;
3422 table = alloc_irq_table(iommu, devid, NULL);
3424 if (!table->min_index) {
3426 * Keep the first 32 indexes free for IOAPIC
3429 table->min_index = 32;
3430 for (i = 0; i < 32; ++i)
3431 iommu->irte_ops->set_allocated(table, i);
3433 WARN_ON(table->min_index != 32);
3434 index = info->ioapic.pin;
3438 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3439 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3440 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3442 index = alloc_irq_index(iommu, devid, nr_irqs, align,
3443 msi_desc_to_pci_dev(info->desc));
3445 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
3449 pr_warn("Failed to allocate IRTE\n");
3451 goto out_free_parent;
3454 for (i = 0; i < nr_irqs; i++) {
3455 irq_data = irq_domain_get_irq_data(domain, virq + i);
3456 cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3463 data = kzalloc(sizeof(*data), GFP_KERNEL);
3467 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3468 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
3470 data->entry = kzalloc(sizeof(struct irte_ga),
3477 data->iommu = iommu;
3478 irq_data->hwirq = (devid << 16) + i;
3479 irq_data->chip_data = data;
3480 irq_data->chip = &amd_ir_chip;
3481 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3482 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
3488 for (i--; i >= 0; i--) {
3489 irq_data = irq_domain_get_irq_data(domain, virq + i);
3491 kfree(irq_data->chip_data);
3493 for (i = 0; i < nr_irqs; i++)
3494 free_irte(iommu, devid, index + i);
3496 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3500 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3501 unsigned int nr_irqs)
3503 struct irq_2_irte *irte_info;
3504 struct irq_data *irq_data;
3505 struct amd_ir_data *data;
3508 for (i = 0; i < nr_irqs; i++) {
3509 irq_data = irq_domain_get_irq_data(domain, virq + i);
3510 if (irq_data && irq_data->chip_data) {
3511 data = irq_data->chip_data;
3512 irte_info = &data->irq_2_irte;
3513 free_irte(data->iommu, irte_info->devid, irte_info->index);
3518 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3521 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3522 struct amd_ir_data *ir_data,
3523 struct irq_2_irte *irte_info,
3524 struct irq_cfg *cfg);
3526 static int irq_remapping_activate(struct irq_domain *domain,
3527 struct irq_data *irq_data, bool reserve)
3529 struct amd_ir_data *data = irq_data->chip_data;
3530 struct irq_2_irte *irte_info = &data->irq_2_irte;
3531 struct amd_iommu *iommu = data->iommu;
3532 struct irq_cfg *cfg = irqd_cfg(irq_data);
3537 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3539 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3543 static void irq_remapping_deactivate(struct irq_domain *domain,
3544 struct irq_data *irq_data)
3546 struct amd_ir_data *data = irq_data->chip_data;
3547 struct irq_2_irte *irte_info = &data->irq_2_irte;
3548 struct amd_iommu *iommu = data->iommu;
3551 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3555 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3556 enum irq_domain_bus_token bus_token)
3558 struct amd_iommu *iommu;
3561 if (!amd_iommu_irq_remap)
3564 if (x86_fwspec_is_ioapic(fwspec))
3565 devid = get_ioapic_devid(fwspec->param[0]);
3566 else if (x86_fwspec_is_hpet(fwspec))
3567 devid = get_hpet_devid(fwspec->param[0]);
3571 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3573 return iommu && iommu->ir_domain == d;
3576 static const struct irq_domain_ops amd_ir_domain_ops = {
3577 .select = irq_remapping_select,
3578 .alloc = irq_remapping_alloc,
3579 .free = irq_remapping_free,
3580 .activate = irq_remapping_activate,
3581 .deactivate = irq_remapping_deactivate,
3584 int amd_iommu_activate_guest_mode(void *data)
3586 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3587 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3590 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
3593 valid = entry->lo.fields_vapic.valid;
3598 entry->lo.fields_vapic.valid = valid;
3599 entry->lo.fields_vapic.guest_mode = 1;
3600 entry->lo.fields_vapic.ga_log_intr = 1;
3601 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
3602 entry->hi.fields.vector = ir_data->ga_vector;
3603 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
3605 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3606 ir_data->irq_2_irte.index, entry);
3608 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
3610 int amd_iommu_deactivate_guest_mode(void *data)
3612 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3613 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3614 struct irq_cfg *cfg = ir_data->cfg;
3617 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3618 !entry || !entry->lo.fields_vapic.guest_mode)
3621 valid = entry->lo.fields_remap.valid;
3626 entry->lo.fields_remap.valid = valid;
3627 entry->lo.fields_remap.dm = apic->dest_mode_logical;
3628 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
3629 entry->hi.fields.vector = cfg->vector;
3630 entry->lo.fields_remap.destination =
3631 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3632 entry->hi.fields.destination =
3633 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3635 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3636 ir_data->irq_2_irte.index, entry);
3638 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
3640 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
3643 struct amd_iommu_pi_data *pi_data = vcpu_info;
3644 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
3645 struct amd_ir_data *ir_data = data->chip_data;
3646 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3647 struct iommu_dev_data *dev_data;
3649 if (ir_data->iommu == NULL)
3652 dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
3655 * This device has never been set up for guest mode.
3656 * we should not modify the IRTE
3658 if (!dev_data || !dev_data->use_vapic)
3661 ir_data->cfg = irqd_cfg(data);
3662 pi_data->ir_data = ir_data;
3665 * SVM tries to set up for VAPIC mode, but we are in
3666 * legacy mode. So, we force legacy mode instead.
3668 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
3669 pr_debug("%s: Fall back to using intr legacy remap\n",
3671 pi_data->is_guest_mode = false;
3674 pi_data->prev_ga_tag = ir_data->cached_ga_tag;
3675 if (pi_data->is_guest_mode) {
3676 ir_data->ga_root_ptr = (pi_data->base >> 12);
3677 ir_data->ga_vector = vcpu_pi_info->vector;
3678 ir_data->ga_tag = pi_data->ga_tag;
3679 ret = amd_iommu_activate_guest_mode(ir_data);
3681 ir_data->cached_ga_tag = pi_data->ga_tag;
3683 ret = amd_iommu_deactivate_guest_mode(ir_data);
3686 * This communicates the ga_tag back to the caller
3687 * so that it can do all the necessary clean up.
3690 ir_data->cached_ga_tag = 0;
3697 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3698 struct amd_ir_data *ir_data,
3699 struct irq_2_irte *irte_info,
3700 struct irq_cfg *cfg)
3704 * Atomically updates the IRTE with the new destination, vector
3705 * and flushes the interrupt entry cache.
3707 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
3708 irte_info->index, cfg->vector,
3712 static int amd_ir_set_affinity(struct irq_data *data,
3713 const struct cpumask *mask, bool force)
3715 struct amd_ir_data *ir_data = data->chip_data;
3716 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3717 struct irq_cfg *cfg = irqd_cfg(data);
3718 struct irq_data *parent = data->parent_data;
3719 struct amd_iommu *iommu = ir_data->iommu;
3725 ret = parent->chip->irq_set_affinity(parent, mask, force);
3726 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
3729 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
3731 * After this point, all the interrupts will start arriving
3732 * at the new destination. So, time to cleanup the previous
3733 * vector allocation.
3735 vector_schedule_cleanup(cfg);
3737 return IRQ_SET_MASK_OK_DONE;
3740 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
3742 struct amd_ir_data *ir_data = irq_data->chip_data;
3744 *msg = ir_data->msi_entry;
3747 static struct irq_chip amd_ir_chip = {
3749 .irq_ack = apic_ack_irq,
3750 .irq_set_affinity = amd_ir_set_affinity,
3751 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
3752 .irq_compose_msi_msg = ir_compose_msi_msg,
3755 static const struct msi_parent_ops amdvi_msi_parent_ops = {
3756 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
3757 MSI_FLAG_MULTI_PCI_MSI |
3760 .init_dev_msi_info = msi_parent_init_dev_msi_info,
3763 static const struct msi_parent_ops virt_amdvi_msi_parent_ops = {
3764 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
3765 MSI_FLAG_MULTI_PCI_MSI,
3767 .init_dev_msi_info = msi_parent_init_dev_msi_info,
3770 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
3772 struct fwnode_handle *fn;
3774 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
3777 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0,
3778 fn, &amd_ir_domain_ops, iommu);
3779 if (!iommu->ir_domain) {
3780 irq_domain_free_fwnode(fn);
3784 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
3785 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
3786 IRQ_DOMAIN_FLAG_ISOLATED_MSI;
3788 if (amd_iommu_np_cache)
3789 iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops;
3791 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
3796 int amd_iommu_update_ga(int cpu, bool is_run, void *data)
3798 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3799 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3801 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3802 !entry || !entry->lo.fields_vapic.guest_mode)
3805 if (!ir_data->iommu)
3809 entry->lo.fields_vapic.destination =
3810 APICID_TO_IRTE_DEST_LO(cpu);
3811 entry->hi.fields.destination =
3812 APICID_TO_IRTE_DEST_HI(cpu);
3814 entry->lo.fields_vapic.is_run = is_run;
3816 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3817 ir_data->irq_2_irte.index, entry);
3819 EXPORT_SYMBOL(amd_iommu_update_ga);