1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
5 * Author: Varun Sethi <varun.sethi@freescale.com>
8 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
10 #include "fsl_pamu_domain.h"
12 #include <sysdev/fsl_pci.h>
15 * Global spinlock that needs to be held while
18 static DEFINE_SPINLOCK(iommu_lock);
20 static struct kmem_cache *fsl_pamu_domain_cache;
21 static struct kmem_cache *iommu_devinfo_cache;
22 static DEFINE_SPINLOCK(device_domain_lock);
24 struct iommu_device pamu_iommu; /* IOMMU core code handle */
26 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
28 return container_of(dom, struct fsl_dma_domain, iommu_domain);
31 static int __init iommu_init_mempool(void)
33 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34 sizeof(struct fsl_dma_domain),
38 if (!fsl_pamu_domain_cache) {
39 pr_debug("Couldn't create fsl iommu_domain cache\n");
43 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44 sizeof(struct device_domain_info),
48 if (!iommu_devinfo_cache) {
49 pr_debug("Couldn't create devinfo cache\n");
50 kmem_cache_destroy(fsl_pamu_domain_cache);
57 /* Map the DMA window corresponding to the LIODN */
58 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
61 struct iommu_domain_geometry *geom = &dma_domain->iommu_domain.geometry;
64 spin_lock_irqsave(&iommu_lock, flags);
65 ret = pamu_config_ppaace(liodn, geom->aperture_start,
66 geom->aperture_end + 1, ~(u32)0,
67 0, dma_domain->snoop_id, dma_domain->stash_id,
68 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
69 spin_unlock_irqrestore(&iommu_lock, flags);
71 pr_debug("PAACE configuration failed for liodn %d\n", liodn);
76 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
82 spin_lock_irqsave(&iommu_lock, flags);
83 ret = pamu_update_paace_stash(liodn, val);
85 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
87 spin_unlock_irqrestore(&iommu_lock, flags);
91 spin_unlock_irqrestore(&iommu_lock, flags);
96 /* Set the geometry parameters for a LIODN */
97 static int pamu_set_liodn(int liodn, struct device *dev,
98 struct fsl_dma_domain *dma_domain,
99 struct iommu_domain_geometry *geom_attr)
101 phys_addr_t window_addr, window_size;
102 u32 omi_index = ~(u32)0;
107 * Configure the omi_index at the geometry setup time.
108 * This is a static value which depends on the type of
109 * device and would not change thereafter.
111 get_ome_index(&omi_index, dev);
113 window_addr = geom_attr->aperture_start;
114 window_size = geom_attr->aperture_end + 1;
116 spin_lock_irqsave(&iommu_lock, flags);
117 ret = pamu_disable_liodn(liodn);
119 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
120 0, dma_domain->snoop_id,
121 dma_domain->stash_id, 0);
122 spin_unlock_irqrestore(&iommu_lock, flags);
124 pr_debug("PAACE configuration failed for liodn %d\n",
132 static void remove_device_ref(struct device_domain_info *info)
136 list_del(&info->link);
137 spin_lock_irqsave(&iommu_lock, flags);
138 pamu_disable_liodn(info->liodn);
139 spin_unlock_irqrestore(&iommu_lock, flags);
140 spin_lock_irqsave(&device_domain_lock, flags);
141 dev_iommu_priv_set(info->dev, NULL);
142 kmem_cache_free(iommu_devinfo_cache, info);
143 spin_unlock_irqrestore(&device_domain_lock, flags);
146 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
148 struct device_domain_info *info, *tmp;
151 spin_lock_irqsave(&dma_domain->domain_lock, flags);
152 /* Remove the device from the domain device list */
153 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
154 if (!dev || (info->dev == dev))
155 remove_device_ref(info);
157 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
160 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
162 struct device_domain_info *info, *old_domain_info;
165 spin_lock_irqsave(&device_domain_lock, flags);
167 * Check here if the device is already attached to domain or not.
168 * If the device is already attached to a domain detach it.
170 old_domain_info = dev_iommu_priv_get(dev);
171 if (old_domain_info && old_domain_info->domain != dma_domain) {
172 spin_unlock_irqrestore(&device_domain_lock, flags);
173 detach_device(dev, old_domain_info->domain);
174 spin_lock_irqsave(&device_domain_lock, flags);
177 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
181 info->domain = dma_domain;
183 list_add(&info->link, &dma_domain->devices);
185 * In case of devices with multiple LIODNs just store
186 * the info for the first LIODN as all
187 * LIODNs share the same domain
189 if (!dev_iommu_priv_get(dev))
190 dev_iommu_priv_set(dev, info);
191 spin_unlock_irqrestore(&device_domain_lock, flags);
194 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
197 if (iova < domain->geometry.aperture_start ||
198 iova > domain->geometry.aperture_end)
203 static bool fsl_pamu_capable(enum iommu_cap cap)
205 return cap == IOMMU_CAP_CACHE_COHERENCY;
208 static void fsl_pamu_domain_free(struct iommu_domain *domain)
210 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
212 /* remove all the devices from the device list */
213 detach_device(NULL, dma_domain);
215 dma_domain->enabled = 0;
217 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
220 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
222 struct fsl_dma_domain *dma_domain;
224 if (type != IOMMU_DOMAIN_UNMANAGED)
227 dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
231 dma_domain->stash_id = ~(u32)0;
232 dma_domain->snoop_id = ~(u32)0;
233 INIT_LIST_HEAD(&dma_domain->devices);
234 spin_lock_init(&dma_domain->domain_lock);
236 /* default geometry 64 GB i.e. maximum system address */
237 dma_domain->iommu_domain. geometry.aperture_start = 0;
238 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
239 dma_domain->iommu_domain.geometry.force_aperture = true;
241 return &dma_domain->iommu_domain;
244 /* Update stash destination for all LIODNs associated with the domain */
245 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
247 struct device_domain_info *info;
250 list_for_each_entry(info, &dma_domain->devices, link) {
251 ret = update_liodn_stash(info->liodn, dma_domain, val);
260 * Attach the LIODN to the DMA domain and configure the geometry
261 * and window mappings.
263 static int handle_attach_device(struct fsl_dma_domain *dma_domain,
264 struct device *dev, const u32 *liodn,
268 struct iommu_domain *domain = &dma_domain->iommu_domain;
272 spin_lock_irqsave(&dma_domain->domain_lock, flags);
273 for (i = 0; i < num; i++) {
274 /* Ensure that LIODN value is valid */
275 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
276 pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
277 liodn[i], dev->of_node);
282 attach_device(dma_domain, liodn[i], dev);
284 * Check if geometry has already been configured
285 * for the domain. If yes, set the geometry for
288 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
294 * Create window/subwindow mapping for
297 ret = map_liodn(liodn[i], dma_domain);
301 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
306 static int fsl_pamu_attach_device(struct iommu_domain *domain,
309 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
313 struct pci_dev *pdev = NULL;
314 struct pci_controller *pci_ctl;
317 * Use LIODN of the PCI controller while attaching a
320 if (dev_is_pci(dev)) {
321 pdev = to_pci_dev(dev);
322 pci_ctl = pci_bus_to_host(pdev->bus);
324 * make dev point to pci controller device
325 * so we can get the LIODN programmed by
328 dev = pci_ctl->parent;
331 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
333 liodn_cnt = len / sizeof(u32);
334 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
336 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
343 static void fsl_pamu_detach_device(struct iommu_domain *domain,
346 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
349 struct pci_dev *pdev = NULL;
350 struct pci_controller *pci_ctl;
353 * Use LIODN of the PCI controller while detaching a
356 if (dev_is_pci(dev)) {
357 pdev = to_pci_dev(dev);
358 pci_ctl = pci_bus_to_host(pdev->bus);
360 * make dev point to pci controller device
361 * so we can get the LIODN programmed by
364 dev = pci_ctl->parent;
367 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
369 detach_device(dev, dma_domain);
371 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
374 /* Set the domain stash attribute */
375 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
377 struct pamu_stash_attribute *stash_attr = data;
381 spin_lock_irqsave(&dma_domain->domain_lock, flags);
383 memcpy(&dma_domain->dma_stash, stash_attr,
384 sizeof(struct pamu_stash_attribute));
386 dma_domain->stash_id = get_stash_id(stash_attr->cache,
388 if (dma_domain->stash_id == ~(u32)0) {
389 pr_debug("Invalid stash attributes\n");
390 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
394 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
396 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
401 /* Configure domain dma state i.e. enable/disable DMA */
402 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
404 struct device_domain_info *info;
408 spin_lock_irqsave(&dma_domain->domain_lock, flags);
409 dma_domain->enabled = enable;
410 list_for_each_entry(info, &dma_domain->devices, link) {
411 ret = (enable) ? pamu_enable_liodn(info->liodn) :
412 pamu_disable_liodn(info->liodn);
414 pr_debug("Unable to set dma state for liodn %d",
417 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
422 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
423 enum iommu_attr attr_type, void *data)
425 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
429 case DOMAIN_ATTR_FSL_PAMU_STASH:
430 ret = configure_domain_stash(dma_domain, data);
432 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
433 ret = configure_domain_dma_state(dma_domain, *(int *)data);
436 pr_debug("Unsupported attribute type\n");
444 static struct iommu_group *get_device_iommu_group(struct device *dev)
446 struct iommu_group *group;
448 group = iommu_group_get(dev);
450 group = iommu_group_alloc();
455 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
459 /* Check the PCI controller version number by readding BRR1 register */
460 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
461 version &= PCI_FSL_BRR1_VER;
462 /* If PCI controller version is >= 0x204 we can partition endpoints */
463 return version >= 0x204;
466 /* Get iommu group information from peer devices or devices on the parent bus */
467 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
470 struct iommu_group *group;
471 struct pci_bus *bus = pdev->bus;
474 * Traverese the pci bus device list to get
475 * the shared iommu group.
478 list_for_each_entry(tmp, &bus->devices, bus_list) {
481 group = iommu_group_get(&tmp->dev);
492 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
494 struct pci_controller *pci_ctl;
495 bool pci_endpt_partitioning;
496 struct iommu_group *group = NULL;
498 pci_ctl = pci_bus_to_host(pdev->bus);
499 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
500 /* We can partition PCIe devices so assign device group to the device */
501 if (pci_endpt_partitioning) {
502 group = pci_device_group(&pdev->dev);
505 * PCIe controller is not a paritionable entity
506 * free the controller device iommu_group.
508 if (pci_ctl->parent->iommu_group)
509 iommu_group_remove_device(pci_ctl->parent);
512 * All devices connected to the controller will share the
513 * PCI controllers device group. If this is the first
514 * device to be probed for the pci controller, copy the
515 * device group information from the PCI controller device
516 * node and remove the PCI controller iommu group.
517 * For subsequent devices, the iommu group information can
518 * be obtained from sibling devices (i.e. from the bus_devices
521 if (pci_ctl->parent->iommu_group) {
522 group = get_device_iommu_group(pci_ctl->parent);
523 iommu_group_remove_device(pci_ctl->parent);
525 group = get_shared_pci_device_group(pdev);
530 group = ERR_PTR(-ENODEV);
535 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
537 struct iommu_group *group = ERR_PTR(-ENODEV);
541 * For platform devices we allocate a separate group for
542 * each of the devices.
545 group = get_pci_device_group(to_pci_dev(dev));
546 else if (of_get_property(dev->of_node, "fsl,liodn", &len))
547 group = get_device_iommu_group(dev);
552 static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
557 static void fsl_pamu_release_device(struct device *dev)
561 static const struct iommu_ops fsl_pamu_ops = {
562 .capable = fsl_pamu_capable,
563 .domain_alloc = fsl_pamu_domain_alloc,
564 .domain_free = fsl_pamu_domain_free,
565 .attach_dev = fsl_pamu_attach_device,
566 .detach_dev = fsl_pamu_detach_device,
567 .iova_to_phys = fsl_pamu_iova_to_phys,
568 .domain_set_attr = fsl_pamu_set_domain_attr,
569 .probe_device = fsl_pamu_probe_device,
570 .release_device = fsl_pamu_release_device,
571 .device_group = fsl_pamu_device_group,
574 int __init pamu_domain_init(void)
578 ret = iommu_init_mempool();
582 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
586 iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
588 ret = iommu_device_register(&pamu_iommu);
590 iommu_device_sysfs_remove(&pamu_iommu);
591 pr_err("Can't register iommu device\n");
595 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
596 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);