1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2006, Intel Corporation.
5 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Ashok Raj <ashok.raj@intel.com>
7 * Author: Shaohua Li <shaohua.li@intel.com>
8 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * This file implements early detection/parsing of Remapping Devices
11 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
14 * These routines are used by both DMA-remapping and Interrupt-remapping
17 #define pr_fmt(fmt) "DMAR: " fmt
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/intel-iommu.h>
23 #include <linux/timer.h>
24 #include <linux/irq.h>
25 #include <linux/interrupt.h>
26 #include <linux/tboot.h>
27 #include <linux/dmi.h>
28 #include <linux/slab.h>
29 #include <linux/iommu.h>
30 #include <linux/numa.h>
31 #include <linux/limits.h>
32 #include <asm/irq_remapping.h>
33 #include <asm/iommu_table.h>
35 #include "../irq_remapping.h"
37 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
38 struct dmar_res_callback {
39 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
40 void *arg[ACPI_DMAR_TYPE_RESERVED];
41 bool ignore_unhandled;
47 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
48 * before IO devices managed by that unit.
49 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
50 * after IO devices managed by that unit.
51 * 3) Hotplug events are rare.
53 * Locking rules for DMA and interrupt remapping related global data structures:
54 * 1) Use dmar_global_lock in process context
55 * 2) Use RCU in interrupt context
57 DECLARE_RWSEM(dmar_global_lock);
58 LIST_HEAD(dmar_drhd_units);
60 struct acpi_table_header * __initdata dmar_tbl;
61 static int dmar_dev_scope_status = 1;
62 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
64 static int alloc_iommu(struct dmar_drhd_unit *drhd);
65 static void free_iommu(struct intel_iommu *iommu);
67 extern const struct iommu_ops intel_iommu_ops;
69 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
72 * add INCLUDE_ALL at the tail, so scan the list will find it at
75 if (drhd->include_all)
76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
78 list_add_rcu(&drhd->list, &dmar_drhd_units);
81 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 struct acpi_dmar_device_scope *scope;
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94 pr_warn("Unsupported device scope\n");
96 start += scope->length;
101 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
104 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
107 struct device *tmp_dev;
109 if (*devices && *cnt) {
110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
119 /* Optimize out kzalloc()/kfree() for normal cases */
120 static char dmar_pci_notify_info_buf[64];
122 static struct dmar_pci_notify_info *
123 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
128 struct dmar_pci_notify_info *info;
130 BUG_ON(dev->is_virtfn);
133 * Ignore devices that have a domain number higher than what can
134 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
136 if (pci_domain_nr(dev->bus) > U16_MAX)
139 /* Only generate path[] for device addition event */
140 if (event == BUS_NOTIFY_ADD_DEVICE)
141 for (tmp = dev; tmp; tmp = tmp->bus->self)
144 size = struct_size(info, path, level);
145 if (size <= sizeof(dmar_pci_notify_info_buf)) {
146 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
148 info = kzalloc(size, GFP_KERNEL);
150 pr_warn("Out of memory when allocating notify_info "
151 "for %s.\n", pci_name(dev));
152 if (dmar_dev_scope_status == 0)
153 dmar_dev_scope_status = -ENOMEM;
160 info->seg = pci_domain_nr(dev->bus);
162 if (event == BUS_NOTIFY_ADD_DEVICE) {
163 for (tmp = dev; tmp; tmp = tmp->bus->self) {
165 info->path[level].bus = tmp->bus->number;
166 info->path[level].device = PCI_SLOT(tmp->devfn);
167 info->path[level].function = PCI_FUNC(tmp->devfn);
168 if (pci_is_root_bus(tmp->bus))
169 info->bus = tmp->bus->number;
176 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
178 if ((void *)info != dmar_pci_notify_info_buf)
182 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
183 struct acpi_dmar_pci_path *path, int count)
187 if (info->bus != bus)
189 if (info->level != count)
192 for (i = 0; i < count; i++) {
193 if (path[i].device != info->path[i].device ||
194 path[i].function != info->path[i].function)
206 if (bus == info->path[i].bus &&
207 path[0].device == info->path[i].device &&
208 path[0].function == info->path[i].function) {
209 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
210 bus, path[0].device, path[0].function);
217 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
218 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
219 void *start, void*end, u16 segment,
220 struct dmar_dev_scope *devices,
224 struct device *tmp, *dev = &info->dev->dev;
225 struct acpi_dmar_device_scope *scope;
226 struct acpi_dmar_pci_path *path;
228 if (segment != info->seg)
231 for (; start < end; start += scope->length) {
233 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
234 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
237 path = (struct acpi_dmar_pci_path *)(scope + 1);
238 level = (scope->length - sizeof(*scope)) / sizeof(*path);
239 if (!dmar_match_pci_path(info, scope->bus, path, level))
243 * We expect devices with endpoint scope to have normal PCI
244 * headers, and devices with bridge scope to have bridge PCI
245 * headers. However PCI NTB devices may be listed in the
246 * DMAR table with bridge scope, even though they have a
247 * normal PCI header. NTB devices are identified by class
248 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
249 * for this special case.
251 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
252 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
253 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
254 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
255 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
256 pr_warn("Device scope type does not match for %s\n",
257 pci_name(info->dev));
261 for_each_dev_scope(devices, devices_cnt, i, tmp)
263 devices[i].bus = info->dev->bus->number;
264 devices[i].devfn = info->dev->devfn;
265 rcu_assign_pointer(devices[i].dev,
269 BUG_ON(i >= devices_cnt);
275 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
276 struct dmar_dev_scope *devices, int count)
281 if (info->seg != segment)
284 for_each_active_dev_scope(devices, count, index, tmp)
285 if (tmp == &info->dev->dev) {
286 RCU_INIT_POINTER(devices[index].dev, NULL);
295 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
298 struct dmar_drhd_unit *dmaru;
299 struct acpi_dmar_hardware_unit *drhd;
301 for_each_drhd_unit(dmaru) {
302 if (dmaru->include_all)
305 drhd = container_of(dmaru->hdr,
306 struct acpi_dmar_hardware_unit, header);
307 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
308 ((void *)drhd) + drhd->header.length,
310 dmaru->devices, dmaru->devices_cnt);
315 ret = dmar_iommu_notify_scope_dev(info);
316 if (ret < 0 && dmar_dev_scope_status == 0)
317 dmar_dev_scope_status = ret;
320 intel_irq_remap_add_device(info);
325 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
327 struct dmar_drhd_unit *dmaru;
329 for_each_drhd_unit(dmaru)
330 if (dmar_remove_dev_scope(info, dmaru->segment,
331 dmaru->devices, dmaru->devices_cnt))
333 dmar_iommu_notify_scope_dev(info);
336 static int dmar_pci_bus_notifier(struct notifier_block *nb,
337 unsigned long action, void *data)
339 struct pci_dev *pdev = to_pci_dev(data);
340 struct dmar_pci_notify_info *info;
342 /* Only care about add/remove events for physical functions.
343 * For VFs we actually do the lookup based on the corresponding
344 * PF in device_to_iommu() anyway. */
347 if (action != BUS_NOTIFY_ADD_DEVICE &&
348 action != BUS_NOTIFY_REMOVED_DEVICE)
351 info = dmar_alloc_pci_notify_info(pdev, action);
355 down_write(&dmar_global_lock);
356 if (action == BUS_NOTIFY_ADD_DEVICE)
357 dmar_pci_bus_add_dev(info);
358 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
359 dmar_pci_bus_del_dev(info);
360 up_write(&dmar_global_lock);
362 dmar_free_pci_notify_info(info);
367 static struct notifier_block dmar_pci_bus_nb = {
368 .notifier_call = dmar_pci_bus_notifier,
372 static struct dmar_drhd_unit *
373 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
375 struct dmar_drhd_unit *dmaru;
377 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
379 if (dmaru->segment == drhd->segment &&
380 dmaru->reg_base_addr == drhd->address)
387 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
388 * structure which uniquely represent one DMA remapping hardware unit
389 * present in the platform
391 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
393 struct acpi_dmar_hardware_unit *drhd;
394 struct dmar_drhd_unit *dmaru;
397 drhd = (struct acpi_dmar_hardware_unit *)header;
398 dmaru = dmar_find_dmaru(drhd);
402 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
407 * If header is allocated from slab by ACPI _DSM method, we need to
408 * copy the content because the memory buffer will be freed on return.
410 dmaru->hdr = (void *)(dmaru + 1);
411 memcpy(dmaru->hdr, header, header->length);
412 dmaru->reg_base_addr = drhd->address;
413 dmaru->segment = drhd->segment;
414 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
415 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
416 ((void *)drhd) + drhd->header.length,
417 &dmaru->devices_cnt);
418 if (dmaru->devices_cnt && dmaru->devices == NULL) {
423 ret = alloc_iommu(dmaru);
425 dmar_free_dev_scope(&dmaru->devices,
426 &dmaru->devices_cnt);
430 dmar_register_drhd_unit(dmaru);
439 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
441 if (dmaru->devices && dmaru->devices_cnt)
442 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
444 free_iommu(dmaru->iommu);
448 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
451 struct acpi_dmar_andd *andd = (void *)header;
453 /* Check for NUL termination within the designated length */
454 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
456 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
457 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
458 dmi_get_system_info(DMI_BIOS_VENDOR),
459 dmi_get_system_info(DMI_BIOS_VERSION),
460 dmi_get_system_info(DMI_PRODUCT_VERSION));
461 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
464 pr_info("ANDD device: %x name: %s\n", andd->device_number,
470 #ifdef CONFIG_ACPI_NUMA
471 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
473 struct acpi_dmar_rhsa *rhsa;
474 struct dmar_drhd_unit *drhd;
476 rhsa = (struct acpi_dmar_rhsa *)header;
477 for_each_drhd_unit(drhd) {
478 if (drhd->reg_base_addr == rhsa->base_address) {
479 int node = pxm_to_node(rhsa->proximity_domain);
481 if (!node_online(node))
483 drhd->iommu->node = node;
488 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
489 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
491 dmi_get_system_info(DMI_BIOS_VENDOR),
492 dmi_get_system_info(DMI_BIOS_VERSION),
493 dmi_get_system_info(DMI_PRODUCT_VERSION));
494 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
499 #define dmar_parse_one_rhsa dmar_res_noop
503 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
505 struct acpi_dmar_hardware_unit *drhd;
506 struct acpi_dmar_reserved_memory *rmrr;
507 struct acpi_dmar_atsr *atsr;
508 struct acpi_dmar_rhsa *rhsa;
510 switch (header->type) {
511 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
512 drhd = container_of(header, struct acpi_dmar_hardware_unit,
514 pr_info("DRHD base: %#016Lx flags: %#x\n",
515 (unsigned long long)drhd->address, drhd->flags);
517 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
518 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
520 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
521 (unsigned long long)rmrr->base_address,
522 (unsigned long long)rmrr->end_address);
524 case ACPI_DMAR_TYPE_ROOT_ATS:
525 atsr = container_of(header, struct acpi_dmar_atsr, header);
526 pr_info("ATSR flags: %#x\n", atsr->flags);
528 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
529 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
530 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
531 (unsigned long long)rhsa->base_address,
532 rhsa->proximity_domain);
534 case ACPI_DMAR_TYPE_NAMESPACE:
535 /* We don't print this here because we need to sanity-check
536 it first. So print it in dmar_parse_one_andd() instead. */
542 * dmar_table_detect - checks to see if the platform supports DMAR devices
544 static int __init dmar_table_detect(void)
546 acpi_status status = AE_OK;
548 /* if we could find DMAR table, then there are DMAR devices */
549 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
551 if (ACPI_SUCCESS(status) && !dmar_tbl) {
552 pr_warn("Unable to map DMAR\n");
553 status = AE_NOT_FOUND;
556 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
559 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
560 size_t len, struct dmar_res_callback *cb)
562 struct acpi_dmar_header *iter, *next;
563 struct acpi_dmar_header *end = ((void *)start) + len;
565 for (iter = start; iter < end; iter = next) {
566 next = (void *)iter + iter->length;
567 if (iter->length == 0) {
568 /* Avoid looping forever on bad ACPI tables */
569 pr_debug(FW_BUG "Invalid 0-length structure\n");
571 } else if (next > end) {
572 /* Avoid passing table end */
573 pr_warn(FW_BUG "Record passes table end\n");
578 dmar_table_print_dmar_entry(iter);
580 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
581 /* continue for forward compatibility */
582 pr_debug("Unknown DMAR structure type %d\n",
584 } else if (cb->cb[iter->type]) {
587 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
590 } else if (!cb->ignore_unhandled) {
591 pr_warn("No handler for DMAR structure type %d\n",
600 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
601 struct dmar_res_callback *cb)
603 return dmar_walk_remapping_entries((void *)(dmar + 1),
604 dmar->header.length - sizeof(*dmar), cb);
608 * parse_dmar_table - parses the DMA reporting table
611 parse_dmar_table(void)
613 struct acpi_table_dmar *dmar;
616 struct dmar_res_callback cb = {
618 .ignore_unhandled = true,
619 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
620 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
621 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
622 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
623 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
624 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
628 * Do it again, earlier dmar_tbl mapping could be mapped with
634 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
635 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
637 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
639 dmar = (struct acpi_table_dmar *)dmar_tbl;
643 if (dmar->width < PAGE_SHIFT - 1) {
644 pr_warn("Invalid DMAR haw\n");
648 pr_info("Host address width %d\n", dmar->width + 1);
649 ret = dmar_walk_dmar_table(dmar, &cb);
650 if (ret == 0 && drhd_count == 0)
651 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
656 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
657 int cnt, struct pci_dev *dev)
663 for_each_active_dev_scope(devices, cnt, index, tmp)
664 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
667 /* Check our parent */
668 dev = dev->bus->self;
674 struct dmar_drhd_unit *
675 dmar_find_matched_drhd_unit(struct pci_dev *dev)
677 struct dmar_drhd_unit *dmaru;
678 struct acpi_dmar_hardware_unit *drhd;
680 dev = pci_physfn(dev);
683 for_each_drhd_unit(dmaru) {
684 drhd = container_of(dmaru->hdr,
685 struct acpi_dmar_hardware_unit,
688 if (dmaru->include_all &&
689 drhd->segment == pci_domain_nr(dev->bus))
692 if (dmar_pci_device_match(dmaru->devices,
693 dmaru->devices_cnt, dev))
703 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
704 struct acpi_device *adev)
706 struct dmar_drhd_unit *dmaru;
707 struct acpi_dmar_hardware_unit *drhd;
708 struct acpi_dmar_device_scope *scope;
711 struct acpi_dmar_pci_path *path;
713 for_each_drhd_unit(dmaru) {
714 drhd = container_of(dmaru->hdr,
715 struct acpi_dmar_hardware_unit,
718 for (scope = (void *)(drhd + 1);
719 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
720 scope = ((void *)scope) + scope->length) {
721 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
723 if (scope->enumeration_id != device_number)
726 path = (void *)(scope + 1);
727 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
728 dev_name(&adev->dev), dmaru->reg_base_addr,
729 scope->bus, path->device, path->function);
730 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
732 dmaru->devices[i].bus = scope->bus;
733 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
735 rcu_assign_pointer(dmaru->devices[i].dev,
736 get_device(&adev->dev));
739 BUG_ON(i >= dmaru->devices_cnt);
742 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
743 device_number, dev_name(&adev->dev));
746 static int __init dmar_acpi_dev_scope_init(void)
748 struct acpi_dmar_andd *andd;
750 if (dmar_tbl == NULL)
753 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
754 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
755 andd = ((void *)andd) + andd->header.length) {
756 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
758 struct acpi_device *adev;
760 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
763 pr_err("Failed to find handle for ACPI object %s\n",
767 if (acpi_bus_get_device(h, &adev)) {
768 pr_err("Failed to get device for ACPI object %s\n",
772 dmar_acpi_insert_dev_scope(andd->device_number, adev);
778 int __init dmar_dev_scope_init(void)
780 struct pci_dev *dev = NULL;
781 struct dmar_pci_notify_info *info;
783 if (dmar_dev_scope_status != 1)
784 return dmar_dev_scope_status;
786 if (list_empty(&dmar_drhd_units)) {
787 dmar_dev_scope_status = -ENODEV;
789 dmar_dev_scope_status = 0;
791 dmar_acpi_dev_scope_init();
793 for_each_pci_dev(dev) {
797 info = dmar_alloc_pci_notify_info(dev,
798 BUS_NOTIFY_ADD_DEVICE);
800 return dmar_dev_scope_status;
802 dmar_pci_bus_add_dev(info);
803 dmar_free_pci_notify_info(info);
808 return dmar_dev_scope_status;
811 void __init dmar_register_bus_notifier(void)
813 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
817 int __init dmar_table_init(void)
819 static int dmar_table_initialized;
822 if (dmar_table_initialized == 0) {
823 ret = parse_dmar_table();
826 pr_info("Parse DMAR table failure.\n");
827 } else if (list_empty(&dmar_drhd_units)) {
828 pr_info("No DMAR devices found\n");
833 dmar_table_initialized = ret;
835 dmar_table_initialized = 1;
838 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
841 static void warn_invalid_dmar(u64 addr, const char *message)
844 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
845 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
847 dmi_get_system_info(DMI_BIOS_VENDOR),
848 dmi_get_system_info(DMI_BIOS_VERSION),
849 dmi_get_system_info(DMI_PRODUCT_VERSION));
850 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
854 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
856 struct acpi_dmar_hardware_unit *drhd;
860 drhd = (void *)entry;
861 if (!drhd->address) {
862 warn_invalid_dmar(0, "");
867 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
869 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
871 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
875 cap = dmar_readq(addr + DMAR_CAP_REG);
876 ecap = dmar_readq(addr + DMAR_ECAP_REG);
881 early_iounmap(addr, VTD_PAGE_SIZE);
883 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
884 warn_invalid_dmar(drhd->address, " returns all ones");
891 int __init detect_intel_iommu(void)
894 struct dmar_res_callback validate_drhd_cb = {
895 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
896 .ignore_unhandled = true,
899 down_write(&dmar_global_lock);
900 ret = dmar_table_detect();
902 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
904 if (!ret && !no_iommu && !iommu_detected &&
905 (!dmar_disabled || dmar_platform_optin())) {
907 /* Make sure ACS will be enabled */
913 x86_init.iommu.iommu_init = intel_iommu_init;
914 x86_platform.iommu_shutdown = intel_iommu_shutdown;
920 acpi_put_table(dmar_tbl);
923 up_write(&dmar_global_lock);
925 return ret ? ret : 1;
928 static void unmap_iommu(struct intel_iommu *iommu)
931 release_mem_region(iommu->reg_phys, iommu->reg_size);
935 * map_iommu: map the iommu's registers
936 * @iommu: the iommu to map
937 * @phys_addr: the physical address of the base resgister
939 * Memory map the iommu's registers. Start w/ a single page, and
940 * possibly expand if that turns out to be insufficent.
942 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
946 iommu->reg_phys = phys_addr;
947 iommu->reg_size = VTD_PAGE_SIZE;
949 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
950 pr_err("Can't reserve memory\n");
955 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
957 pr_err("Can't map the region\n");
962 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
963 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
965 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
967 warn_invalid_dmar(phys_addr, " returns all ones");
970 iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
972 /* the registers might be more than one page */
973 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
974 cap_max_fault_reg_offset(iommu->cap));
975 map_size = VTD_PAGE_ALIGN(map_size);
976 if (map_size > iommu->reg_size) {
978 release_mem_region(iommu->reg_phys, iommu->reg_size);
979 iommu->reg_size = map_size;
980 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
982 pr_err("Can't reserve memory\n");
986 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
988 pr_err("Can't map the region\n");
999 release_mem_region(iommu->reg_phys, iommu->reg_size);
1004 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1006 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1007 DMAR_UNITS_SUPPORTED);
1008 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1011 set_bit(iommu->seq_id, dmar_seq_ids);
1012 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1015 return iommu->seq_id;
1018 static void dmar_free_seq_id(struct intel_iommu *iommu)
1020 if (iommu->seq_id >= 0) {
1021 clear_bit(iommu->seq_id, dmar_seq_ids);
1026 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1028 struct intel_iommu *iommu;
1034 if (!drhd->reg_base_addr) {
1035 warn_invalid_dmar(0, "");
1039 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1043 if (dmar_alloc_seq_id(iommu) < 0) {
1044 pr_err("Failed to allocate seq_id\n");
1049 err = map_iommu(iommu, drhd->reg_base_addr);
1051 pr_err("Failed to map %s\n", iommu->name);
1052 goto error_free_seq_id;
1056 if (cap_sagaw(iommu->cap) == 0) {
1057 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1062 if (!drhd->ignored) {
1063 agaw = iommu_calculate_agaw(iommu);
1065 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1070 if (!drhd->ignored) {
1071 msagaw = iommu_calculate_max_sagaw(iommu);
1073 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1080 iommu->msagaw = msagaw;
1081 iommu->segment = drhd->segment;
1083 iommu->node = NUMA_NO_NODE;
1085 ver = readl(iommu->reg + DMAR_VER_REG);
1086 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1088 (unsigned long long)drhd->reg_base_addr,
1089 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1090 (unsigned long long)iommu->cap,
1091 (unsigned long long)iommu->ecap);
1093 /* Reflect status in gcmd */
1094 sts = readl(iommu->reg + DMAR_GSTS_REG);
1095 if (sts & DMA_GSTS_IRES)
1096 iommu->gcmd |= DMA_GCMD_IRE;
1097 if (sts & DMA_GSTS_TES)
1098 iommu->gcmd |= DMA_GCMD_TE;
1099 if (sts & DMA_GSTS_QIES)
1100 iommu->gcmd |= DMA_GCMD_QIE;
1102 raw_spin_lock_init(&iommu->register_lock);
1105 * This is only for hotplug; at boot time intel_iommu_enabled won't
1106 * be set yet. When intel_iommu_init() runs, it registers the units
1107 * present at boot time, then sets intel_iommu_enabled.
1109 if (intel_iommu_enabled && !drhd->ignored) {
1110 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1116 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1118 err = iommu_device_register(&iommu->iommu);
1123 drhd->iommu = iommu;
1131 dmar_free_seq_id(iommu);
1137 static void free_iommu(struct intel_iommu *iommu)
1139 if (intel_iommu_enabled && iommu->iommu.ops) {
1140 iommu_device_unregister(&iommu->iommu);
1141 iommu_device_sysfs_remove(&iommu->iommu);
1145 if (iommu->pr_irq) {
1146 free_irq(iommu->pr_irq, iommu);
1147 dmar_free_hwirq(iommu->pr_irq);
1150 free_irq(iommu->irq, iommu);
1151 dmar_free_hwirq(iommu->irq);
1156 free_page((unsigned long)iommu->qi->desc);
1157 kfree(iommu->qi->desc_status);
1164 dmar_free_seq_id(iommu);
1169 * Reclaim all the submitted descriptors which have completed its work.
1171 static inline void reclaim_free_desc(struct q_inval *qi)
1173 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1174 qi->desc_status[qi->free_tail] == QI_ABORT) {
1175 qi->desc_status[qi->free_tail] = QI_FREE;
1176 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1181 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1185 struct q_inval *qi = iommu->qi;
1186 int shift = qi_shift(iommu);
1188 if (qi->desc_status[wait_index] == QI_ABORT)
1191 fault = readl(iommu->reg + DMAR_FSTS_REG);
1194 * If IQE happens, the head points to the descriptor associated
1195 * with the error. No new descriptors are fetched until the IQE
1198 if (fault & DMA_FSTS_IQE) {
1199 head = readl(iommu->reg + DMAR_IQH_REG);
1200 if ((head >> shift) == index) {
1201 struct qi_desc *desc = qi->desc + head;
1204 * desc->qw2 and desc->qw3 are either reserved or
1205 * used by software as private data. We won't print
1206 * out these two qw's for security consideration.
1208 pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
1209 (unsigned long long)desc->qw0,
1210 (unsigned long long)desc->qw1);
1211 memcpy(desc, qi->desc + (wait_index << shift),
1213 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1219 * If ITE happens, all pending wait_desc commands are aborted.
1220 * No new descriptors are fetched until the ITE is cleared.
1222 if (fault & DMA_FSTS_ITE) {
1223 head = readl(iommu->reg + DMAR_IQH_REG);
1224 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1226 tail = readl(iommu->reg + DMAR_IQT_REG);
1227 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1229 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1232 if (qi->desc_status[head] == QI_IN_USE)
1233 qi->desc_status[head] = QI_ABORT;
1234 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1235 } while (head != tail);
1237 if (qi->desc_status[wait_index] == QI_ABORT)
1241 if (fault & DMA_FSTS_ICE)
1242 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1248 * Function to submit invalidation descriptors of all types to the queued
1249 * invalidation interface(QI). Multiple descriptors can be submitted at a
1250 * time, a wait descriptor will be appended to each submission to ensure
1251 * hardware has completed the invalidation before return. Wait descriptors
1252 * can be part of the submission but it will not be polled for completion.
1254 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1255 unsigned int count, unsigned long options)
1257 struct q_inval *qi = iommu->qi;
1258 struct qi_desc wait_desc;
1259 int wait_index, index;
1260 unsigned long flags;
1270 raw_spin_lock_irqsave(&qi->q_lock, flags);
1272 * Check if we have enough empty slots in the queue to submit,
1273 * the calculation is based on:
1274 * # of desc + 1 wait desc + 1 space between head and tail
1276 while (qi->free_cnt < count + 2) {
1277 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1279 raw_spin_lock_irqsave(&qi->q_lock, flags);
1282 index = qi->free_head;
1283 wait_index = (index + count) % QI_LENGTH;
1284 shift = qi_shift(iommu);
1286 for (i = 0; i < count; i++) {
1287 offset = ((index + i) % QI_LENGTH) << shift;
1288 memcpy(qi->desc + offset, &desc[i], 1 << shift);
1289 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1291 qi->desc_status[wait_index] = QI_IN_USE;
1293 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1294 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1295 if (options & QI_OPT_WAIT_DRAIN)
1296 wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1297 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1301 offset = wait_index << shift;
1302 memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1304 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1305 qi->free_cnt -= count + 1;
1308 * update the HW tail register indicating the presence of
1311 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1313 while (qi->desc_status[wait_index] != QI_DONE) {
1315 * We will leave the interrupts disabled, to prevent interrupt
1316 * context to queue another cmd while a cmd is already submitted
1317 * and waiting for completion on this cpu. This is to avoid
1318 * a deadlock where the interrupt context can wait indefinitely
1319 * for free slots in the queue.
1321 rc = qi_check_fault(iommu, index, wait_index);
1325 raw_spin_unlock(&qi->q_lock);
1327 raw_spin_lock(&qi->q_lock);
1330 for (i = 0; i < count; i++)
1331 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
1333 reclaim_free_desc(qi);
1334 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1343 * Flush the global interrupt entry cache.
1345 void qi_global_iec(struct intel_iommu *iommu)
1347 struct qi_desc desc;
1349 desc.qw0 = QI_IEC_TYPE;
1354 /* should never fail */
1355 qi_submit_sync(iommu, &desc, 1, 0);
1358 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1361 struct qi_desc desc;
1363 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1364 | QI_CC_GRAN(type) | QI_CC_TYPE;
1369 qi_submit_sync(iommu, &desc, 1, 0);
1372 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1373 unsigned int size_order, u64 type)
1377 struct qi_desc desc;
1380 if (cap_write_drain(iommu->cap))
1383 if (cap_read_drain(iommu->cap))
1386 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1387 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1388 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1389 | QI_IOTLB_AM(size_order);
1393 qi_submit_sync(iommu, &desc, 1, 0);
1396 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1397 u16 qdep, u64 addr, unsigned mask)
1399 struct qi_desc desc;
1402 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1403 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1405 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1407 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1410 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1411 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1415 qi_submit_sync(iommu, &desc, 1, 0);
1418 /* PASID-based IOTLB invalidation */
1419 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1420 unsigned long npages, bool ih)
1422 struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1425 * npages == -1 means a PASID-selective invalidation, otherwise,
1426 * a positive value for Page-selective-within-PASID invalidation.
1427 * 0 is not a valid input.
1429 if (WARN_ON(!npages)) {
1430 pr_err("Invalid input npages = %ld\n", npages);
1435 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1436 QI_EIOTLB_DID(did) |
1437 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1441 int mask = ilog2(__roundup_pow_of_two(npages));
1442 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1444 if (WARN_ON_ONCE(!ALIGN(addr, align)))
1445 addr &= ~(align - 1);
1447 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1448 QI_EIOTLB_DID(did) |
1449 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1451 desc.qw1 = QI_EIOTLB_ADDR(addr) |
1456 qi_submit_sync(iommu, &desc, 1, 0);
1459 /* PASID-based device IOTLB Invalidate */
1460 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1461 u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
1463 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1464 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1466 desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1467 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1468 QI_DEV_IOTLB_PFSID(pfsid);
1471 * If S bit is 0, we only flush a single page. If S bit is set,
1472 * The least significant zero bit indicates the invalidation address
1473 * range. VT-d spec 6.5.2.6.
1474 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1475 * size order = 0 is PAGE_SIZE 4KB
1476 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1479 if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
1480 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1483 /* Take page address */
1484 desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1488 * Existing 0s in address below size_order may be the least
1489 * significant bit, we must set them to 1s to avoid having
1490 * smaller size than desired.
1492 desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1494 /* Clear size_order bit to indicate size */
1496 /* Set the S bit to indicate flushing more than 1 page */
1497 desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1500 qi_submit_sync(iommu, &desc, 1, 0);
1503 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1504 u64 granu, u32 pasid)
1506 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1508 desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1509 QI_PC_GRAN(granu) | QI_PC_TYPE;
1510 qi_submit_sync(iommu, &desc, 1, 0);
1514 * Disable Queued Invalidation interface.
1516 void dmar_disable_qi(struct intel_iommu *iommu)
1518 unsigned long flags;
1520 cycles_t start_time = get_cycles();
1522 if (!ecap_qis(iommu->ecap))
1525 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1527 sts = readl(iommu->reg + DMAR_GSTS_REG);
1528 if (!(sts & DMA_GSTS_QIES))
1532 * Give a chance to HW to complete the pending invalidation requests.
1534 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1535 readl(iommu->reg + DMAR_IQH_REG)) &&
1536 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1539 iommu->gcmd &= ~DMA_GCMD_QIE;
1540 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1542 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1543 !(sts & DMA_GSTS_QIES), sts);
1545 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1549 * Enable queued invalidation.
1551 static void __dmar_enable_qi(struct intel_iommu *iommu)
1554 unsigned long flags;
1555 struct q_inval *qi = iommu->qi;
1556 u64 val = virt_to_phys(qi->desc);
1558 qi->free_head = qi->free_tail = 0;
1559 qi->free_cnt = QI_LENGTH;
1562 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1565 if (ecap_smts(iommu->ecap))
1566 val |= (1 << 11) | 1;
1568 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1570 /* write zero to the tail reg */
1571 writel(0, iommu->reg + DMAR_IQT_REG);
1573 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1575 iommu->gcmd |= DMA_GCMD_QIE;
1576 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1578 /* Make sure hardware complete it */
1579 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1581 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1585 * Enable Queued Invalidation interface. This is a must to support
1586 * interrupt-remapping. Also used by DMA-remapping, which replaces
1587 * register based IOTLB invalidation.
1589 int dmar_enable_qi(struct intel_iommu *iommu)
1592 struct page *desc_page;
1594 if (!ecap_qis(iommu->ecap))
1598 * queued invalidation is already setup and enabled.
1603 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1610 * Need two pages to accommodate 256 descriptors of 256 bits each
1611 * if the remapping hardware supports scalable mode translation.
1613 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1614 !!ecap_smts(iommu->ecap));
1621 qi->desc = page_address(desc_page);
1623 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1624 if (!qi->desc_status) {
1625 free_page((unsigned long) qi->desc);
1631 raw_spin_lock_init(&qi->q_lock);
1633 __dmar_enable_qi(iommu);
1638 /* iommu interrupt handling. Most stuff are MSI-like. */
1646 static const char *dma_remap_fault_reasons[] =
1649 "Present bit in root entry is clear",
1650 "Present bit in context entry is clear",
1651 "Invalid context entry",
1652 "Access beyond MGAW",
1653 "PTE Write access is not set",
1654 "PTE Read access is not set",
1655 "Next page table ptr is invalid",
1656 "Root table address invalid",
1657 "Context table ptr is invalid",
1658 "non-zero reserved fields in RTP",
1659 "non-zero reserved fields in CTP",
1660 "non-zero reserved fields in PTE",
1661 "PCE for translation request specifies blocking",
1664 static const char * const dma_remap_sm_fault_reasons[] = {
1665 "SM: Invalid Root Table Address",
1666 "SM: TTM 0 for request with PASID",
1667 "SM: TTM 0 for page group request",
1668 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1669 "SM: Error attempting to access Root Entry",
1670 "SM: Present bit in Root Entry is clear",
1671 "SM: Non-zero reserved field set in Root Entry",
1672 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1673 "SM: Error attempting to access Context Entry",
1674 "SM: Present bit in Context Entry is clear",
1675 "SM: Non-zero reserved field set in the Context Entry",
1676 "SM: Invalid Context Entry",
1677 "SM: DTE field in Context Entry is clear",
1678 "SM: PASID Enable field in Context Entry is clear",
1679 "SM: PASID is larger than the max in Context Entry",
1680 "SM: PRE field in Context-Entry is clear",
1681 "SM: RID_PASID field error in Context-Entry",
1682 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1683 "SM: Error attempting to access the PASID Directory Entry",
1684 "SM: Present bit in Directory Entry is clear",
1685 "SM: Non-zero reserved field set in PASID Directory Entry",
1686 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1687 "SM: Error attempting to access PASID Table Entry",
1688 "SM: Present bit in PASID Table Entry is clear",
1689 "SM: Non-zero reserved field set in PASID Table Entry",
1690 "SM: Invalid Scalable-Mode PASID Table Entry",
1691 "SM: ERE field is clear in PASID Table Entry",
1692 "SM: SRE field is clear in PASID Table Entry",
1693 "Unknown", "Unknown",/* 0x5E-0x5F */
1694 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1695 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1696 "SM: Error attempting to access first-level paging entry",
1697 "SM: Present bit in first-level paging entry is clear",
1698 "SM: Non-zero reserved field set in first-level paging entry",
1699 "SM: Error attempting to access FL-PML4 entry",
1700 "SM: First-level entry address beyond MGAW in Nested translation",
1701 "SM: Read permission error in FL-PML4 entry in Nested translation",
1702 "SM: Read permission error in first-level paging entry in Nested translation",
1703 "SM: Write permission error in first-level paging entry in Nested translation",
1704 "SM: Error attempting to access second-level paging entry",
1705 "SM: Read/Write permission error in second-level paging entry",
1706 "SM: Non-zero reserved field set in second-level paging entry",
1707 "SM: Invalid second-level page table pointer",
1708 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1709 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1710 "SM: Address in first-level translation is not canonical",
1711 "SM: U/S set 0 for first-level translation with user privilege",
1712 "SM: No execute permission for request with PASID and ER=1",
1713 "SM: Address beyond the DMA hardware max",
1714 "SM: Second-level entry address beyond the max",
1715 "SM: No write permission for Write/AtomicOp request",
1716 "SM: No read permission for Read/AtomicOp request",
1717 "SM: Invalid address-interrupt address",
1718 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1719 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1722 static const char *irq_remap_fault_reasons[] =
1724 "Detected reserved fields in the decoded interrupt-remapped request",
1725 "Interrupt index exceeded the interrupt-remapping table size",
1726 "Present field in the IRTE entry is clear",
1727 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1728 "Detected reserved fields in the IRTE entry",
1729 "Blocked a compatibility format interrupt request",
1730 "Blocked an interrupt request due to source-id verification failure",
1733 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1735 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1736 ARRAY_SIZE(irq_remap_fault_reasons))) {
1737 *fault_type = INTR_REMAP;
1738 return irq_remap_fault_reasons[fault_reason - 0x20];
1739 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1740 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1741 *fault_type = DMA_REMAP;
1742 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1743 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1744 *fault_type = DMA_REMAP;
1745 return dma_remap_fault_reasons[fault_reason];
1747 *fault_type = UNKNOWN;
1753 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1755 if (iommu->irq == irq)
1756 return DMAR_FECTL_REG;
1757 else if (iommu->pr_irq == irq)
1758 return DMAR_PECTL_REG;
1763 void dmar_msi_unmask(struct irq_data *data)
1765 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1766 int reg = dmar_msi_reg(iommu, data->irq);
1770 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1771 writel(0, iommu->reg + reg);
1772 /* Read a reg to force flush the post write */
1773 readl(iommu->reg + reg);
1774 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1777 void dmar_msi_mask(struct irq_data *data)
1779 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1780 int reg = dmar_msi_reg(iommu, data->irq);
1784 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1785 writel(DMA_FECTL_IM, iommu->reg + reg);
1786 /* Read a reg to force flush the post write */
1787 readl(iommu->reg + reg);
1788 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1791 void dmar_msi_write(int irq, struct msi_msg *msg)
1793 struct intel_iommu *iommu = irq_get_handler_data(irq);
1794 int reg = dmar_msi_reg(iommu, irq);
1797 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1798 writel(msg->data, iommu->reg + reg + 4);
1799 writel(msg->address_lo, iommu->reg + reg + 8);
1800 writel(msg->address_hi, iommu->reg + reg + 12);
1801 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1804 void dmar_msi_read(int irq, struct msi_msg *msg)
1806 struct intel_iommu *iommu = irq_get_handler_data(irq);
1807 int reg = dmar_msi_reg(iommu, irq);
1810 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1811 msg->data = readl(iommu->reg + reg + 4);
1812 msg->address_lo = readl(iommu->reg + reg + 8);
1813 msg->address_hi = readl(iommu->reg + reg + 12);
1814 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1817 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1818 u8 fault_reason, u32 pasid, u16 source_id,
1819 unsigned long long addr)
1824 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1826 if (fault_type == INTR_REMAP)
1827 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1828 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1829 PCI_FUNC(source_id & 0xFF), addr >> 48,
1830 fault_reason, reason);
1832 pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
1833 type ? "DMA Read" : "DMA Write",
1834 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1835 PCI_FUNC(source_id & 0xFF), pasid, addr,
1836 fault_reason, reason);
1840 #define PRIMARY_FAULT_REG_LEN (16)
1841 irqreturn_t dmar_fault(int irq, void *dev_id)
1843 struct intel_iommu *iommu = dev_id;
1844 int reg, fault_index;
1847 static DEFINE_RATELIMIT_STATE(rs,
1848 DEFAULT_RATELIMIT_INTERVAL,
1849 DEFAULT_RATELIMIT_BURST);
1851 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1852 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1853 if (fault_status && __ratelimit(&rs))
1854 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1856 /* TBD: ignore advanced fault log currently */
1857 if (!(fault_status & DMA_FSTS_PPF))
1860 fault_index = dma_fsts_fault_record_index(fault_status);
1861 reg = cap_fault_reg_offset(iommu->cap);
1863 /* Disable printing, simply clear the fault when ratelimited */
1864 bool ratelimited = !__ratelimit(&rs);
1873 /* highest 32 bits */
1874 data = readl(iommu->reg + reg +
1875 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1876 if (!(data & DMA_FRCD_F))
1880 fault_reason = dma_frcd_fault_reason(data);
1881 type = dma_frcd_type(data);
1883 pasid = dma_frcd_pasid_value(data);
1884 data = readl(iommu->reg + reg +
1885 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1886 source_id = dma_frcd_source_id(data);
1888 pasid_present = dma_frcd_pasid_present(data);
1889 guest_addr = dmar_readq(iommu->reg + reg +
1890 fault_index * PRIMARY_FAULT_REG_LEN);
1891 guest_addr = dma_frcd_page_addr(guest_addr);
1894 /* clear the fault */
1895 writel(DMA_FRCD_F, iommu->reg + reg +
1896 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1898 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1901 /* Using pasid -1 if pasid is not present */
1902 dmar_fault_do_one(iommu, type, fault_reason,
1903 pasid_present ? pasid : -1,
1904 source_id, guest_addr);
1907 if (fault_index >= cap_num_fault_regs(iommu->cap))
1909 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1912 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1913 iommu->reg + DMAR_FSTS_REG);
1916 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1920 int dmar_set_interrupt(struct intel_iommu *iommu)
1925 * Check if the fault interrupt is already initialized.
1930 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1934 pr_err("No free IRQ vectors\n");
1938 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1940 pr_err("Can't request irq\n");
1944 int __init enable_drhd_fault_handling(void)
1946 struct dmar_drhd_unit *drhd;
1947 struct intel_iommu *iommu;
1950 * Enable fault control interrupt.
1952 for_each_iommu(iommu, drhd) {
1954 int ret = dmar_set_interrupt(iommu);
1957 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1958 (unsigned long long)drhd->reg_base_addr, ret);
1963 * Clear any previous faults.
1965 dmar_fault(iommu->irq, iommu);
1966 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1967 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1974 * Re-enable Queued Invalidation interface.
1976 int dmar_reenable_qi(struct intel_iommu *iommu)
1978 if (!ecap_qis(iommu->ecap))
1985 * First disable queued invalidation.
1987 dmar_disable_qi(iommu);
1989 * Then enable queued invalidation again. Since there is no pending
1990 * invalidation requests now, it's safe to re-enable queued
1993 __dmar_enable_qi(iommu);
1999 * Check interrupt remapping support in DMAR table description.
2001 int __init dmar_ir_support(void)
2003 struct acpi_table_dmar *dmar;
2004 dmar = (struct acpi_table_dmar *)dmar_tbl;
2007 return dmar->flags & 0x1;
2010 /* Check whether DMAR units are in use */
2011 static inline bool dmar_in_use(void)
2013 return irq_remapping_enabled || intel_iommu_enabled;
2016 static int __init dmar_free_unused_resources(void)
2018 struct dmar_drhd_unit *dmaru, *dmaru_n;
2023 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2024 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2026 down_write(&dmar_global_lock);
2027 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2028 list_del(&dmaru->list);
2029 dmar_free_drhd(dmaru);
2031 up_write(&dmar_global_lock);
2036 late_initcall(dmar_free_unused_resources);
2037 IOMMU_INIT_POST(detect_intel_iommu);
2040 * DMAR Hotplug Support
2041 * For more details, please refer to Intel(R) Virtualization Technology
2042 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2043 * "Remapping Hardware Unit Hot Plug".
2045 static guid_t dmar_hp_guid =
2046 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2047 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2050 * Currently there's only one revision and BIOS will not check the revision id,
2051 * so use 0 for safety.
2053 #define DMAR_DSM_REV_ID 0
2054 #define DMAR_DSM_FUNC_DRHD 1
2055 #define DMAR_DSM_FUNC_ATSR 2
2056 #define DMAR_DSM_FUNC_RHSA 3
2058 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2060 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2063 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2064 dmar_res_handler_t handler, void *arg)
2067 union acpi_object *obj;
2068 struct acpi_dmar_header *start;
2069 struct dmar_res_callback callback;
2070 static int res_type[] = {
2071 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2072 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2073 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2076 if (!dmar_detect_dsm(handle, func))
2079 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2080 func, NULL, ACPI_TYPE_BUFFER);
2084 memset(&callback, 0, sizeof(callback));
2085 callback.cb[res_type[func]] = handler;
2086 callback.arg[res_type[func]] = arg;
2087 start = (struct acpi_dmar_header *)obj->buffer.pointer;
2088 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2095 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2098 struct dmar_drhd_unit *dmaru;
2100 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2104 ret = dmar_ir_hotplug(dmaru, true);
2106 ret = dmar_iommu_hotplug(dmaru, true);
2111 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2115 struct dmar_drhd_unit *dmaru;
2117 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2122 * All PCI devices managed by this unit should have been destroyed.
2124 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2125 for_each_active_dev_scope(dmaru->devices,
2126 dmaru->devices_cnt, i, dev)
2130 ret = dmar_ir_hotplug(dmaru, false);
2132 ret = dmar_iommu_hotplug(dmaru, false);
2137 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2139 struct dmar_drhd_unit *dmaru;
2141 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2143 list_del_rcu(&dmaru->list);
2145 dmar_free_drhd(dmaru);
2151 static int dmar_hotplug_insert(acpi_handle handle)
2156 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2157 &dmar_validate_one_drhd, (void *)1);
2161 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2162 &dmar_parse_one_drhd, (void *)&drhd_count);
2163 if (ret == 0 && drhd_count == 0) {
2164 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2170 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2171 &dmar_parse_one_rhsa, NULL);
2175 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2176 &dmar_parse_one_atsr, NULL);
2180 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2181 &dmar_hp_add_drhd, NULL);
2185 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2186 &dmar_hp_remove_drhd, NULL);
2188 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2189 &dmar_release_one_atsr, NULL);
2191 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2192 &dmar_hp_release_drhd, NULL);
2197 static int dmar_hotplug_remove(acpi_handle handle)
2201 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2202 &dmar_check_one_atsr, NULL);
2206 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2207 &dmar_hp_remove_drhd, NULL);
2209 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2210 &dmar_release_one_atsr, NULL));
2211 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2212 &dmar_hp_release_drhd, NULL));
2214 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2215 &dmar_hp_add_drhd, NULL);
2221 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2222 void *context, void **retval)
2224 acpi_handle *phdl = retval;
2226 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2228 return AE_CTRL_TERMINATE;
2234 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2237 acpi_handle tmp = NULL;
2243 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2246 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2248 dmar_get_dsm_handle,
2250 if (ACPI_FAILURE(status)) {
2251 pr_warn("Failed to locate _DSM method.\n");
2258 down_write(&dmar_global_lock);
2260 ret = dmar_hotplug_insert(tmp);
2262 ret = dmar_hotplug_remove(tmp);
2263 up_write(&dmar_global_lock);
2268 int dmar_device_add(acpi_handle handle)
2270 return dmar_device_hotplug(handle, true);
2273 int dmar_device_remove(acpi_handle handle)
2275 return dmar_device_hotplug(handle, false);
2279 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2281 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2282 * the ACPI DMAR table. This means that the platform boot firmware has made
2283 * sure no device can issue DMA outside of RMRR regions.
2285 bool dmar_platform_optin(void)
2287 struct acpi_table_dmar *dmar;
2291 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2292 (struct acpi_table_header **)&dmar);
2293 if (ACPI_FAILURE(status))
2296 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2297 acpi_put_table((struct acpi_table_header *)dmar);
2301 EXPORT_SYMBOL_GPL(dmar_platform_optin);