2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * This file implements early detection/parsing of I/O mapping
15 * reported to OS through firmware via I/O Remapping Table (IORT)
16 * IORT document number: ARM DEN 0049A
19 #define pr_fmt(fmt) "ACPI: IORT: " fmt
21 #include <linux/acpi_iort.h>
22 #include <linux/iommu.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/pci.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
29 #define IORT_TYPE_MASK(type) (1 << (type))
30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
32 (1 << ACPI_IORT_NODE_SMMU_V3))
34 /* Until ACPICA headers cover IORT rev. C */
35 #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
36 #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
39 struct iort_its_msi_chip {
40 struct list_head list;
41 struct fwnode_handle *fw_node;
46 struct list_head list;
47 struct acpi_iort_node *iort_node;
48 struct fwnode_handle *fwnode;
50 static LIST_HEAD(iort_fwnode_list);
51 static DEFINE_SPINLOCK(iort_fwnode_lock);
54 * iort_set_fwnode() - Create iort_fwnode and use it to register
55 * iommu data in the iort_fwnode_list
57 * @node: IORT table node associated with the IOMMU
58 * @fwnode: fwnode associated with the IORT node
60 * Returns: 0 on success
63 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
64 struct fwnode_handle *fwnode)
66 struct iort_fwnode *np;
68 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
73 INIT_LIST_HEAD(&np->list);
74 np->iort_node = iort_node;
77 spin_lock(&iort_fwnode_lock);
78 list_add_tail(&np->list, &iort_fwnode_list);
79 spin_unlock(&iort_fwnode_lock);
85 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
87 * @node: IORT table node to be looked-up
89 * Returns: fwnode_handle pointer on success, NULL on failure
91 static inline struct fwnode_handle *iort_get_fwnode(
92 struct acpi_iort_node *node)
94 struct iort_fwnode *curr;
95 struct fwnode_handle *fwnode = NULL;
97 spin_lock(&iort_fwnode_lock);
98 list_for_each_entry(curr, &iort_fwnode_list, list) {
99 if (curr->iort_node == node) {
100 fwnode = curr->fwnode;
104 spin_unlock(&iort_fwnode_lock);
110 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
112 * @node: IORT table node associated with fwnode to delete
114 static inline void iort_delete_fwnode(struct acpi_iort_node *node)
116 struct iort_fwnode *curr, *tmp;
118 spin_lock(&iort_fwnode_lock);
119 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
120 if (curr->iort_node == node) {
121 list_del(&curr->list);
126 spin_unlock(&iort_fwnode_lock);
130 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
132 * @fwnode: fwnode associated with device to be looked-up
134 * Returns: iort_node pointer on success, NULL on failure
136 static inline struct acpi_iort_node *iort_get_iort_node(
137 struct fwnode_handle *fwnode)
139 struct iort_fwnode *curr;
140 struct acpi_iort_node *iort_node = NULL;
142 spin_lock(&iort_fwnode_lock);
143 list_for_each_entry(curr, &iort_fwnode_list, list) {
144 if (curr->fwnode == fwnode) {
145 iort_node = curr->iort_node;
149 spin_unlock(&iort_fwnode_lock);
154 typedef acpi_status (*iort_find_node_callback)
155 (struct acpi_iort_node *node, void *context);
157 /* Root pointer to the mapped IORT table */
158 static struct acpi_table_header *iort_table;
160 static LIST_HEAD(iort_msi_chip_list);
161 static DEFINE_SPINLOCK(iort_msi_chip_lock);
164 * iort_register_domain_token() - register domain token and related ITS ID
165 * to the list from where we can get it back later on.
167 * @fw_node: Domain token.
169 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
171 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
173 struct iort_its_msi_chip *its_msi_chip;
175 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
179 its_msi_chip->fw_node = fw_node;
180 its_msi_chip->translation_id = trans_id;
182 spin_lock(&iort_msi_chip_lock);
183 list_add(&its_msi_chip->list, &iort_msi_chip_list);
184 spin_unlock(&iort_msi_chip_lock);
190 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
195 void iort_deregister_domain_token(int trans_id)
197 struct iort_its_msi_chip *its_msi_chip, *t;
199 spin_lock(&iort_msi_chip_lock);
200 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
201 if (its_msi_chip->translation_id == trans_id) {
202 list_del(&its_msi_chip->list);
207 spin_unlock(&iort_msi_chip_lock);
211 * iort_find_domain_token() - Find domain token based on given ITS ID
214 * Returns: domain token when find on the list, NULL otherwise
216 struct fwnode_handle *iort_find_domain_token(int trans_id)
218 struct fwnode_handle *fw_node = NULL;
219 struct iort_its_msi_chip *its_msi_chip;
221 spin_lock(&iort_msi_chip_lock);
222 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
223 if (its_msi_chip->translation_id == trans_id) {
224 fw_node = its_msi_chip->fw_node;
228 spin_unlock(&iort_msi_chip_lock);
233 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
234 iort_find_node_callback callback,
237 struct acpi_iort_node *iort_node, *iort_end;
238 struct acpi_table_iort *iort;
244 /* Get the first IORT node */
245 iort = (struct acpi_table_iort *)iort_table;
246 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
248 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
251 for (i = 0; i < iort->node_count; i++) {
252 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
253 "IORT node pointer overflows, bad table!\n"))
256 if (iort_node->type == type &&
257 ACPI_SUCCESS(callback(iort_node, context)))
260 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
267 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
270 struct device *dev = context;
271 acpi_status status = AE_NOT_FOUND;
273 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
274 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
275 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
276 struct acpi_iort_named_component *ncomp;
281 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
282 if (ACPI_FAILURE(status)) {
283 dev_warn(dev, "Can't get device full path name\n");
287 ncomp = (struct acpi_iort_named_component *)node->node_data;
288 status = !strcmp(ncomp->device_name, buf.pointer) ?
289 AE_OK : AE_NOT_FOUND;
290 acpi_os_free(buf.pointer);
291 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
292 struct acpi_iort_root_complex *pci_rc;
295 bus = to_pci_bus(dev);
296 pci_rc = (struct acpi_iort_root_complex *)node->node_data;
299 * It is assumed that PCI segment numbers maps one-to-one
300 * with root complexes. Each segment number can represent only
303 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
304 AE_OK : AE_NOT_FOUND;
310 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
313 /* Single mapping does not care for input id */
314 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
315 if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
316 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
317 *rid_out = map->output_base;
321 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
326 if (rid_in < map->input_base ||
327 (rid_in >= map->input_base + map->id_count))
330 *rid_out = map->output_base + (rid_in - map->input_base);
334 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
335 u32 *id_out, int index)
337 struct acpi_iort_node *parent;
338 struct acpi_iort_id_mapping *map;
340 if (!node->mapping_offset || !node->mapping_count ||
341 index >= node->mapping_count)
344 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
345 node->mapping_offset + index * sizeof(*map));
348 if (!map->output_reference) {
349 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
354 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
355 map->output_reference);
357 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
358 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
359 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
360 node->type == ACPI_IORT_NODE_SMMU_V3) {
361 *id_out = map->output_base;
369 #if (ACPI_CA_VERSION > 0x20170929)
370 static int iort_get_id_mapping_index(struct acpi_iort_node *node)
372 struct acpi_iort_smmu_v3 *smmu;
374 switch (node->type) {
375 case ACPI_IORT_NODE_SMMU_V3:
377 * SMMUv3 dev ID mapping index was introduced in revision 1
378 * table, not available in revision 0
380 if (node->revision < 1)
383 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
385 * ID mapping index is only ignored if all interrupts are
388 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
392 if (smmu->id_mapping_index >= node->mapping_count) {
393 pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
398 return smmu->id_mapping_index;
404 static inline int iort_get_id_mapping_index(struct acpi_iort_node *node)
410 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
411 u32 id_in, u32 *id_out,
416 /* Parse the ID mapping tree to find specified node type */
418 struct acpi_iort_id_mapping *map;
421 if (IORT_TYPE_MASK(node->type) & type_mask) {
427 if (!node->mapping_offset || !node->mapping_count)
430 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
431 node->mapping_offset);
434 if (!map->output_reference) {
435 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
441 * Get the special ID mapping index (if any) and skip its
442 * associated ID map to prevent erroneous multi-stage
443 * IORT ID translations.
445 index = iort_get_id_mapping_index(node);
447 /* Do the ID translation */
448 for (i = 0; i < node->mapping_count; i++, map++) {
449 /* if it is special mapping index, skip it */
453 if (!iort_id_map(map, node->type, id, &id))
457 if (i == node->mapping_count)
460 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
461 map->output_reference);
465 /* Map input ID to output ID unchanged on mapping failure */
472 static struct acpi_iort_node *iort_node_map_platform_id(
473 struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
476 struct acpi_iort_node *parent;
479 /* step 1: retrieve the initial dev id */
480 parent = iort_node_get_id(node, &id, index);
485 * optional step 2: map the initial dev id if its parent is not
486 * the target type we want, map it again for the use cases such
487 * as NC (named component) -> SMMU -> ITS. If the type is matched,
488 * return the initial dev id and its parent pointer directly.
490 if (!(IORT_TYPE_MASK(parent->type) & type_mask))
491 parent = iort_node_map_id(parent, id, id_out, type_mask);
499 static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
501 struct pci_bus *pbus;
503 if (!dev_is_pci(dev)) {
504 struct acpi_iort_node *node;
506 * scan iort_fwnode_list to see if it's an iort platform
507 * device (such as SMMU, PMCG),its iort node already cached
508 * and associated with fwnode when iort platform devices
511 node = iort_get_iort_node(dev->fwnode);
516 * if not, then it should be a platform device defined in
517 * DSDT/SSDT (with Named Component node in IORT)
519 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
520 iort_match_node_callback, dev);
523 /* Find a PCI root bus */
524 pbus = to_pci_dev(dev)->bus;
525 while (!pci_is_root_bus(pbus))
528 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
529 iort_match_node_callback, &pbus->dev);
533 * iort_msi_map_rid() - Map a MSI requester ID for a device
534 * @dev: The device for which the mapping is to be done.
535 * @req_id: The device requester ID.
537 * Returns: mapped MSI RID on success, input requester ID otherwise
539 u32 iort_msi_map_rid(struct device *dev, u32 req_id)
541 struct acpi_iort_node *node;
544 node = iort_find_dev_node(dev);
548 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
553 * iort_pmsi_get_dev_id() - Get the device id for a device
554 * @dev: The device for which the mapping is to be done.
555 * @dev_id: The device ID found.
557 * Returns: 0 for successful find a dev id, -ENODEV on error
559 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
562 struct acpi_iort_node *node;
564 node = iort_find_dev_node(dev);
568 index = iort_get_id_mapping_index(node);
569 /* if there is a valid index, go get the dev_id directly */
571 if (iort_node_get_id(node, dev_id, index))
574 for (i = 0; i < node->mapping_count; i++) {
575 if (iort_node_map_platform_id(node, dev_id,
585 * iort_dev_find_its_id() - Find the ITS identifier for a device
587 * @req_id: Device's requester ID
588 * @idx: Index of the ITS identifier list.
589 * @its_id: ITS identifier.
591 * Returns: 0 on success, appropriate error value otherwise
593 static int iort_dev_find_its_id(struct device *dev, u32 req_id,
594 unsigned int idx, int *its_id)
596 struct acpi_iort_its_group *its;
597 struct acpi_iort_node *node;
599 node = iort_find_dev_node(dev);
603 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
607 /* Move to ITS specific data */
608 its = (struct acpi_iort_its_group *)node->node_data;
609 if (idx > its->its_count) {
610 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
611 idx, its->its_count);
615 *its_id = its->identifiers[idx];
620 * iort_get_device_domain() - Find MSI domain related to a device
622 * @req_id: Requester ID for the device.
624 * Returns: the MSI domain for this device, NULL otherwise
626 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
628 struct fwnode_handle *handle;
631 if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
634 handle = iort_find_domain_token(its_id);
638 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
641 static void iort_set_device_domain(struct device *dev,
642 struct acpi_iort_node *node)
644 struct acpi_iort_its_group *its;
645 struct acpi_iort_node *msi_parent;
646 struct acpi_iort_id_mapping *map;
647 struct fwnode_handle *iort_fwnode;
648 struct irq_domain *domain;
651 index = iort_get_id_mapping_index(node);
655 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
656 node->mapping_offset + index * sizeof(*map));
659 if (!map->output_reference ||
660 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
661 pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
666 msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
667 map->output_reference);
669 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
672 /* Move to ITS specific data */
673 its = (struct acpi_iort_its_group *)msi_parent->node_data;
675 iort_fwnode = iort_find_domain_token(its->identifiers[0]);
679 domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
681 dev_set_msi_domain(dev, domain);
685 * iort_get_platform_device_domain() - Find MSI domain related to a
687 * @dev: the dev pointer associated with the platform device
689 * Returns: the MSI domain for this device, NULL otherwise
691 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
693 struct acpi_iort_node *node, *msi_parent;
694 struct fwnode_handle *iort_fwnode;
695 struct acpi_iort_its_group *its;
698 /* find its associated iort node */
699 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
700 iort_match_node_callback, dev);
704 /* then find its msi parent node */
705 for (i = 0; i < node->mapping_count; i++) {
706 msi_parent = iort_node_map_platform_id(node, NULL,
715 /* Move to ITS specific data */
716 its = (struct acpi_iort_its_group *)msi_parent->node_data;
718 iort_fwnode = iort_find_domain_token(its->identifiers[0]);
722 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
725 void acpi_configure_pmsi_domain(struct device *dev)
727 struct irq_domain *msi_domain;
729 msi_domain = iort_get_platform_device_domain(dev);
731 dev_set_msi_domain(dev, msi_domain);
734 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
743 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
744 struct fwnode_handle *fwnode,
745 const struct iommu_ops *ops)
747 int ret = iommu_fwspec_init(dev, fwnode, ops);
750 ret = iommu_fwspec_add_ids(dev, &streamid, 1);
755 static inline bool iort_iommu_driver_enabled(u8 type)
758 case ACPI_IORT_NODE_SMMU_V3:
759 return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
760 case ACPI_IORT_NODE_SMMU:
761 return IS_BUILTIN(CONFIG_ARM_SMMU);
763 pr_warn("IORT node type %u does not describe an SMMU\n", type);
768 #ifdef CONFIG_IOMMU_API
769 static inline const struct iommu_ops *iort_fwspec_iommu_ops(
770 struct iommu_fwspec *fwspec)
772 return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
775 static inline int iort_add_device_replay(const struct iommu_ops *ops,
780 if (ops->add_device && dev->bus && !dev->iommu_group)
781 err = ops->add_device(dev);
786 static inline const struct iommu_ops *iort_fwspec_iommu_ops(
787 struct iommu_fwspec *fwspec)
789 static inline int iort_add_device_replay(const struct iommu_ops *ops,
794 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
797 const struct iommu_ops *ops;
798 struct fwnode_handle *iort_fwnode;
803 iort_fwnode = iort_get_fwnode(node);
808 * If the ops look-up fails, this means that either
809 * the SMMU drivers have not been probed yet or that
810 * the SMMU drivers are not built in the kernel;
811 * Depending on whether the SMMU drivers are built-in
812 * in the kernel or not, defer the IOMMU configuration
815 ops = iommu_ops_from_fwnode(iort_fwnode);
817 return iort_iommu_driver_enabled(node->type) ?
818 -EPROBE_DEFER : -ENODEV;
820 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
823 struct iort_pci_alias_info {
825 struct acpi_iort_node *node;
828 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
830 struct iort_pci_alias_info *info = data;
831 struct acpi_iort_node *parent;
834 parent = iort_node_map_id(info->node, alias, &streamid,
836 return iort_iommu_xlate(info->dev, parent, streamid);
839 static int nc_dma_get_range(struct device *dev, u64 *size)
841 struct acpi_iort_node *node;
842 struct acpi_iort_named_component *ncomp;
844 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
845 iort_match_node_callback, dev);
849 ncomp = (struct acpi_iort_named_component *)node->node_data;
851 *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
852 1ULL<<ncomp->memory_address_limit;
858 * iort_dma_setup() - Set-up device DMA parameters.
860 * @dev: device to configure
861 * @dma_addr: device DMA address result pointer
862 * @size: DMA range size result pointer
864 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
866 u64 mask, dmaaddr = 0, size = 0, offset = 0;
870 * Set default coherent_dma_mask to 32 bit. Drivers are expected to
871 * setup the correct supported mask.
873 if (!dev->coherent_dma_mask)
874 dev->coherent_dma_mask = DMA_BIT_MASK(32);
877 * Set it to coherent_dma_mask by default if the architecture
878 * code has not set it.
881 dev->dma_mask = &dev->coherent_dma_mask;
883 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
886 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
888 ret = nc_dma_get_range(dev, &size);
891 msb = fls64(dmaaddr + size - 1);
893 * Round-up to the power-of-two mask or set
894 * the mask to the whole 64-bit address space
895 * in case the DMA region covers the full
898 mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
900 * Limit coherent and dma mask based on size
901 * retrieved from firmware.
903 dev->coherent_dma_mask = mask;
904 *dev->dma_mask = mask;
910 dev->dma_pfn_offset = PFN_DOWN(offset);
911 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
915 * iort_iommu_configure - Set-up IOMMU configuration for a device.
917 * @dev: device to configure
919 * Returns: iommu_ops pointer on configuration success
920 * NULL on configuration failure
922 const struct iommu_ops *iort_iommu_configure(struct device *dev)
924 struct acpi_iort_node *node, *parent;
925 const struct iommu_ops *ops;
930 * If we already translated the fwspec there
931 * is nothing left to do, return the iommu_ops.
933 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
937 if (dev_is_pci(dev)) {
938 struct pci_bus *bus = to_pci_dev(dev)->bus;
939 struct iort_pci_alias_info info = { .dev = dev };
941 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
942 iort_match_node_callback, &bus->dev);
947 err = pci_for_each_dma_alias(to_pci_dev(dev),
948 iort_pci_iommu_init, &info);
952 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
953 iort_match_node_callback, dev);
958 parent = iort_node_map_platform_id(node, &streamid,
963 err = iort_iommu_xlate(dev, parent, streamid);
964 } while (parent && !err);
968 * If we have reason to believe the IOMMU driver missed the initial
969 * add_device callback for dev, replay it to get things in order.
972 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
973 err = iort_add_device_replay(ops, dev);
976 /* Ignore all other errors apart from EPROBE_DEFER */
977 if (err == -EPROBE_DEFER) {
980 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
987 static void __init acpi_iort_register_irq(int hwirq, const char *name,
989 struct resource *res)
991 int irq = acpi_register_gsi(NULL, hwirq, trigger,
995 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1002 res->flags = IORESOURCE_IRQ;
1006 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1008 struct acpi_iort_smmu_v3 *smmu;
1009 /* Always present mem resource */
1012 /* Retrieve SMMUv3 specific data */
1013 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1015 if (smmu->event_gsiv)
1021 if (smmu->gerr_gsiv)
1024 if (smmu->sync_gsiv)
1030 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1033 * Cavium ThunderX2 implementation doesn't not support unique
1034 * irq line. Use single irq line for all the SMMUv3 interrupts.
1036 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1040 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1043 return smmu->event_gsiv == smmu->pri_gsiv &&
1044 smmu->event_gsiv == smmu->gerr_gsiv &&
1045 smmu->event_gsiv == smmu->sync_gsiv;
1048 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1051 * Override the size, for Cavium ThunderX2 implementation
1052 * which doesn't support the page 1 SMMU register space.
1054 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1060 static void __init arm_smmu_v3_init_resources(struct resource *res,
1061 struct acpi_iort_node *node)
1063 struct acpi_iort_smmu_v3 *smmu;
1066 /* Retrieve SMMUv3 specific data */
1067 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1069 res[num_res].start = smmu->base_address;
1070 res[num_res].end = smmu->base_address +
1071 arm_smmu_v3_resource_size(smmu) - 1;
1072 res[num_res].flags = IORESOURCE_MEM;
1075 if (arm_smmu_v3_is_combined_irq(smmu)) {
1076 if (smmu->event_gsiv)
1077 acpi_iort_register_irq(smmu->event_gsiv, "combined",
1078 ACPI_EDGE_SENSITIVE,
1082 if (smmu->event_gsiv)
1083 acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1084 ACPI_EDGE_SENSITIVE,
1088 acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1089 ACPI_EDGE_SENSITIVE,
1092 if (smmu->gerr_gsiv)
1093 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1094 ACPI_EDGE_SENSITIVE,
1097 if (smmu->sync_gsiv)
1098 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1099 ACPI_EDGE_SENSITIVE,
1104 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1106 struct acpi_iort_smmu_v3 *smmu;
1108 /* Retrieve SMMUv3 specific data */
1109 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1111 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
1114 #if defined(CONFIG_ACPI_NUMA)
1116 * set numa proximity domain for smmuv3 device
1118 static void __init arm_smmu_v3_set_proximity(struct device *dev,
1119 struct acpi_iort_node *node)
1121 struct acpi_iort_smmu_v3 *smmu;
1123 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1124 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1125 set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1126 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1132 #define arm_smmu_v3_set_proximity NULL
1135 static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1137 struct acpi_iort_smmu *smmu;
1139 /* Retrieve SMMU specific data */
1140 smmu = (struct acpi_iort_smmu *)node->node_data;
1143 * Only consider the global fault interrupt and ignore the
1144 * configuration access interrupt.
1146 * MMIO address and global fault interrupt resources are always
1147 * present so add them to the context interrupt count as a static
1150 return smmu->context_interrupt_count + 2;
1153 static void __init arm_smmu_init_resources(struct resource *res,
1154 struct acpi_iort_node *node)
1156 struct acpi_iort_smmu *smmu;
1157 int i, hw_irq, trigger, num_res = 0;
1158 u64 *ctx_irq, *glb_irq;
1160 /* Retrieve SMMU specific data */
1161 smmu = (struct acpi_iort_smmu *)node->node_data;
1163 res[num_res].start = smmu->base_address;
1164 res[num_res].end = smmu->base_address + smmu->span - 1;
1165 res[num_res].flags = IORESOURCE_MEM;
1168 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1170 hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1171 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1173 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1177 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1178 for (i = 0; i < smmu->context_interrupt_count; i++) {
1179 hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1180 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1182 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1187 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
1189 struct acpi_iort_smmu *smmu;
1191 /* Retrieve SMMU specific data */
1192 smmu = (struct acpi_iort_smmu *)node->node_data;
1194 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
1197 struct iort_dev_config {
1199 int (*dev_init)(struct acpi_iort_node *node);
1200 bool (*dev_is_coherent)(struct acpi_iort_node *node);
1201 int (*dev_count_resources)(struct acpi_iort_node *node);
1202 void (*dev_init_resources)(struct resource *res,
1203 struct acpi_iort_node *node);
1204 void (*dev_set_proximity)(struct device *dev,
1205 struct acpi_iort_node *node);
1208 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1209 .name = "arm-smmu-v3",
1210 .dev_is_coherent = arm_smmu_v3_is_coherent,
1211 .dev_count_resources = arm_smmu_v3_count_resources,
1212 .dev_init_resources = arm_smmu_v3_init_resources,
1213 .dev_set_proximity = arm_smmu_v3_set_proximity,
1216 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1218 .dev_is_coherent = arm_smmu_is_coherent,
1219 .dev_count_resources = arm_smmu_count_resources,
1220 .dev_init_resources = arm_smmu_init_resources
1223 static __init const struct iort_dev_config *iort_get_dev_cfg(
1224 struct acpi_iort_node *node)
1226 switch (node->type) {
1227 case ACPI_IORT_NODE_SMMU_V3:
1228 return &iort_arm_smmu_v3_cfg;
1229 case ACPI_IORT_NODE_SMMU:
1230 return &iort_arm_smmu_cfg;
1237 * iort_add_platform_device() - Allocate a platform device for IORT node
1238 * @node: Pointer to device ACPI IORT node
1240 * Returns: 0 on success, <0 failure
1242 static int __init iort_add_platform_device(struct acpi_iort_node *node,
1243 const struct iort_dev_config *ops)
1245 struct fwnode_handle *fwnode;
1246 struct platform_device *pdev;
1248 enum dev_dma_attr attr;
1251 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1255 if (ops->dev_set_proximity)
1256 ops->dev_set_proximity(&pdev->dev, node);
1258 count = ops->dev_count_resources(node);
1260 r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1266 ops->dev_init_resources(r, node);
1268 ret = platform_device_add_resources(pdev, r, count);
1270 * Resources are duplicated in platform_device_add_resources,
1271 * free their allocated memory
1279 * Add a copy of IORT node pointer to platform_data to
1280 * be used to retrieve IORT data information.
1282 ret = platform_device_add_data(pdev, &node, sizeof(node));
1287 * We expect the dma masks to be equivalent for
1290 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1292 fwnode = iort_get_fwnode(node);
1299 pdev->dev.fwnode = fwnode;
1301 attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
1302 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1304 /* Configure DMA for the page table walker */
1305 acpi_dma_configure(&pdev->dev, attr);
1307 iort_set_device_domain(&pdev->dev, node);
1309 ret = platform_device_add(pdev);
1311 goto dma_deconfigure;
1316 acpi_dma_deconfigure(&pdev->dev);
1318 platform_device_put(pdev);
1323 static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
1325 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1326 struct acpi_iort_node *parent;
1327 struct acpi_iort_id_mapping *map;
1330 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1331 iort_node->mapping_offset);
1333 for (i = 0; i < iort_node->mapping_count; i++, map++) {
1334 if (!map->output_reference)
1337 parent = ACPI_ADD_PTR(struct acpi_iort_node,
1338 iort_table, map->output_reference);
1340 * If we detect a RC->SMMU mapping, make sure
1341 * we enable ACS on the system.
1343 if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1344 (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1354 static void __init iort_init_platform_devices(void)
1356 struct acpi_iort_node *iort_node, *iort_end;
1357 struct acpi_table_iort *iort;
1358 struct fwnode_handle *fwnode;
1360 bool acs_enabled = false;
1361 const struct iort_dev_config *ops;
1364 * iort_table and iort both point to the start of IORT table, but
1365 * have different struct types
1367 iort = (struct acpi_table_iort *)iort_table;
1369 /* Get the first IORT node */
1370 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1372 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1373 iort_table->length);
1375 for (i = 0; i < iort->node_count; i++) {
1376 if (iort_node >= iort_end) {
1377 pr_err("iort node pointer overflows, bad table\n");
1382 acs_enabled = iort_enable_acs(iort_node);
1384 ops = iort_get_dev_cfg(iort_node);
1386 fwnode = acpi_alloc_fwnode_static();
1390 iort_set_fwnode(iort_node, fwnode);
1392 ret = iort_add_platform_device(iort_node, ops);
1394 iort_delete_fwnode(iort_node);
1395 acpi_free_fwnode_static(fwnode);
1400 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1405 void __init acpi_iort_init(void)
1409 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1410 if (ACPI_FAILURE(status)) {
1411 if (status != AE_NOT_FOUND) {
1412 const char *msg = acpi_format_exception(status);
1414 pr_err("Failed to get table, %s\n", msg);
1420 iort_init_platform_devices();