1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
5 * Author: Varun Sethi <varun.sethi@freescale.com>
8 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
10 #include "fsl_pamu_domain.h"
12 #include <sysdev/fsl_pci.h>
15 * Global spinlock that needs to be held while
18 static DEFINE_SPINLOCK(iommu_lock);
20 static struct kmem_cache *fsl_pamu_domain_cache;
21 static struct kmem_cache *iommu_devinfo_cache;
22 static DEFINE_SPINLOCK(device_domain_lock);
24 struct iommu_device pamu_iommu; /* IOMMU core code handle */
26 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
28 return container_of(dom, struct fsl_dma_domain, iommu_domain);
31 static int __init iommu_init_mempool(void)
33 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34 sizeof(struct fsl_dma_domain),
38 if (!fsl_pamu_domain_cache) {
39 pr_debug("Couldn't create fsl iommu_domain cache\n");
43 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44 sizeof(struct device_domain_info),
48 if (!iommu_devinfo_cache) {
49 pr_debug("Couldn't create devinfo cache\n");
50 kmem_cache_destroy(fsl_pamu_domain_cache);
57 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
63 spin_lock_irqsave(&iommu_lock, flags);
64 ret = pamu_update_paace_stash(liodn, val);
66 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
68 spin_unlock_irqrestore(&iommu_lock, flags);
72 spin_unlock_irqrestore(&iommu_lock, flags);
77 /* Set the geometry parameters for a LIODN */
78 static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
81 struct iommu_domain *domain = &dma_domain->iommu_domain;
82 struct iommu_domain_geometry *geom = &domain->geometry;
83 u32 omi_index = ~(u32)0;
88 * Configure the omi_index at the geometry setup time.
89 * This is a static value which depends on the type of
90 * device and would not change thereafter.
92 get_ome_index(&omi_index, dev);
94 spin_lock_irqsave(&iommu_lock, flags);
95 ret = pamu_disable_liodn(liodn);
98 ret = pamu_config_ppaace(liodn, geom->aperture_start,
99 geom->aperture_end + 1, omi_index, 0,
100 ~(u32)0, dma_domain->stash_id, 0);
103 ret = pamu_config_ppaace(liodn, geom->aperture_start,
104 geom->aperture_end + 1, ~(u32)0,
105 0, ~(u32)0, dma_domain->stash_id,
106 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
108 spin_unlock_irqrestore(&iommu_lock, flags);
110 pr_debug("PAACE configuration failed for liodn %d\n",
116 static void remove_device_ref(struct device_domain_info *info)
120 list_del(&info->link);
121 spin_lock_irqsave(&iommu_lock, flags);
122 pamu_disable_liodn(info->liodn);
123 spin_unlock_irqrestore(&iommu_lock, flags);
124 spin_lock_irqsave(&device_domain_lock, flags);
125 dev_iommu_priv_set(info->dev, NULL);
126 kmem_cache_free(iommu_devinfo_cache, info);
127 spin_unlock_irqrestore(&device_domain_lock, flags);
130 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
132 struct device_domain_info *info, *tmp;
135 spin_lock_irqsave(&dma_domain->domain_lock, flags);
136 /* Remove the device from the domain device list */
137 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
138 if (!dev || (info->dev == dev))
139 remove_device_ref(info);
141 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
144 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
146 struct device_domain_info *info, *old_domain_info;
149 spin_lock_irqsave(&device_domain_lock, flags);
151 * Check here if the device is already attached to domain or not.
152 * If the device is already attached to a domain detach it.
154 old_domain_info = dev_iommu_priv_get(dev);
155 if (old_domain_info && old_domain_info->domain != dma_domain) {
156 spin_unlock_irqrestore(&device_domain_lock, flags);
157 detach_device(dev, old_domain_info->domain);
158 spin_lock_irqsave(&device_domain_lock, flags);
161 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
165 info->domain = dma_domain;
167 list_add(&info->link, &dma_domain->devices);
169 * In case of devices with multiple LIODNs just store
170 * the info for the first LIODN as all
171 * LIODNs share the same domain
173 if (!dev_iommu_priv_get(dev))
174 dev_iommu_priv_set(dev, info);
175 spin_unlock_irqrestore(&device_domain_lock, flags);
178 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
181 if (iova < domain->geometry.aperture_start ||
182 iova > domain->geometry.aperture_end)
187 static bool fsl_pamu_capable(enum iommu_cap cap)
189 return cap == IOMMU_CAP_CACHE_COHERENCY;
192 static void fsl_pamu_domain_free(struct iommu_domain *domain)
194 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
196 /* remove all the devices from the device list */
197 detach_device(NULL, dma_domain);
198 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
201 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
203 struct fsl_dma_domain *dma_domain;
205 if (type != IOMMU_DOMAIN_UNMANAGED)
208 dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
212 dma_domain->stash_id = ~(u32)0;
213 INIT_LIST_HEAD(&dma_domain->devices);
214 spin_lock_init(&dma_domain->domain_lock);
216 /* default geometry 64 GB i.e. maximum system address */
217 dma_domain->iommu_domain. geometry.aperture_start = 0;
218 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
219 dma_domain->iommu_domain.geometry.force_aperture = true;
221 return &dma_domain->iommu_domain;
224 /* Update stash destination for all LIODNs associated with the domain */
225 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
227 struct device_domain_info *info;
230 list_for_each_entry(info, &dma_domain->devices, link) {
231 ret = update_liodn_stash(info->liodn, dma_domain, val);
239 static int fsl_pamu_attach_device(struct iommu_domain *domain,
242 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
246 struct pci_dev *pdev = NULL;
247 struct pci_controller *pci_ctl;
250 * Use LIODN of the PCI controller while attaching a
253 if (dev_is_pci(dev)) {
254 pdev = to_pci_dev(dev);
255 pci_ctl = pci_bus_to_host(pdev->bus);
257 * make dev point to pci controller device
258 * so we can get the LIODN programmed by
261 dev = pci_ctl->parent;
264 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
266 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
270 spin_lock_irqsave(&dma_domain->domain_lock, flags);
271 for (i = 0; i < len / sizeof(u32); i++) {
272 /* Ensure that LIODN value is valid */
273 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
274 pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
275 liodn[i], dev->of_node);
280 attach_device(dma_domain, liodn[i], dev);
281 ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
284 ret = pamu_enable_liodn(liodn[i]);
288 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
292 static void fsl_pamu_detach_device(struct iommu_domain *domain,
295 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
298 struct pci_dev *pdev = NULL;
299 struct pci_controller *pci_ctl;
302 * Use LIODN of the PCI controller while detaching a
305 if (dev_is_pci(dev)) {
306 pdev = to_pci_dev(dev);
307 pci_ctl = pci_bus_to_host(pdev->bus);
309 * make dev point to pci controller device
310 * so we can get the LIODN programmed by
313 dev = pci_ctl->parent;
316 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
318 detach_device(dev, dma_domain);
320 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
323 /* Set the domain stash attribute */
324 int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
326 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
330 spin_lock_irqsave(&dma_domain->domain_lock, flags);
331 dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
332 if (dma_domain->stash_id == ~(u32)0) {
333 pr_debug("Invalid stash attributes\n");
334 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
337 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
338 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
343 static struct iommu_group *get_device_iommu_group(struct device *dev)
345 struct iommu_group *group;
347 group = iommu_group_get(dev);
349 group = iommu_group_alloc();
354 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
358 /* Check the PCI controller version number by readding BRR1 register */
359 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
360 version &= PCI_FSL_BRR1_VER;
361 /* If PCI controller version is >= 0x204 we can partition endpoints */
362 return version >= 0x204;
365 /* Get iommu group information from peer devices or devices on the parent bus */
366 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
369 struct iommu_group *group;
370 struct pci_bus *bus = pdev->bus;
373 * Traverese the pci bus device list to get
374 * the shared iommu group.
377 list_for_each_entry(tmp, &bus->devices, bus_list) {
380 group = iommu_group_get(&tmp->dev);
391 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
393 struct pci_controller *pci_ctl;
394 bool pci_endpt_partitioning;
395 struct iommu_group *group = NULL;
397 pci_ctl = pci_bus_to_host(pdev->bus);
398 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
399 /* We can partition PCIe devices so assign device group to the device */
400 if (pci_endpt_partitioning) {
401 group = pci_device_group(&pdev->dev);
404 * PCIe controller is not a paritionable entity
405 * free the controller device iommu_group.
407 if (pci_ctl->parent->iommu_group)
408 iommu_group_remove_device(pci_ctl->parent);
411 * All devices connected to the controller will share the
412 * PCI controllers device group. If this is the first
413 * device to be probed for the pci controller, copy the
414 * device group information from the PCI controller device
415 * node and remove the PCI controller iommu group.
416 * For subsequent devices, the iommu group information can
417 * be obtained from sibling devices (i.e. from the bus_devices
420 if (pci_ctl->parent->iommu_group) {
421 group = get_device_iommu_group(pci_ctl->parent);
422 iommu_group_remove_device(pci_ctl->parent);
424 group = get_shared_pci_device_group(pdev);
429 group = ERR_PTR(-ENODEV);
434 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
436 struct iommu_group *group = ERR_PTR(-ENODEV);
440 * For platform devices we allocate a separate group for
441 * each of the devices.
444 group = get_pci_device_group(to_pci_dev(dev));
445 else if (of_get_property(dev->of_node, "fsl,liodn", &len))
446 group = get_device_iommu_group(dev);
451 static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
456 static void fsl_pamu_release_device(struct device *dev)
460 static const struct iommu_ops fsl_pamu_ops = {
461 .capable = fsl_pamu_capable,
462 .domain_alloc = fsl_pamu_domain_alloc,
463 .domain_free = fsl_pamu_domain_free,
464 .attach_dev = fsl_pamu_attach_device,
465 .detach_dev = fsl_pamu_detach_device,
466 .iova_to_phys = fsl_pamu_iova_to_phys,
467 .probe_device = fsl_pamu_probe_device,
468 .release_device = fsl_pamu_release_device,
469 .device_group = fsl_pamu_device_group,
472 int __init pamu_domain_init(void)
476 ret = iommu_init_mempool();
480 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
484 iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
486 ret = iommu_device_register(&pamu_iommu);
488 iommu_device_sysfs_remove(&pamu_iommu);
489 pr_err("Can't register iommu device\n");
493 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
494 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);