iommu/fsl_pamu: remove the snoop_id field
[linux-2.6-microblaze.git] / drivers / iommu / fsl_pamu_domain.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2013 Freescale Semiconductor, Inc.
5  * Author: Varun Sethi <varun.sethi@freescale.com>
6  */
7
8 #define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
9
10 #include "fsl_pamu_domain.h"
11
12 #include <sysdev/fsl_pci.h>
13
14 /*
15  * Global spinlock that needs to be held while
16  * configuring PAMU.
17  */
18 static DEFINE_SPINLOCK(iommu_lock);
19
20 static struct kmem_cache *fsl_pamu_domain_cache;
21 static struct kmem_cache *iommu_devinfo_cache;
22 static DEFINE_SPINLOCK(device_domain_lock);
23
24 struct iommu_device pamu_iommu; /* IOMMU core code handle */
25
26 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
27 {
28         return container_of(dom, struct fsl_dma_domain, iommu_domain);
29 }
30
31 static int __init iommu_init_mempool(void)
32 {
33         fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34                                                   sizeof(struct fsl_dma_domain),
35                                                   0,
36                                                   SLAB_HWCACHE_ALIGN,
37                                                   NULL);
38         if (!fsl_pamu_domain_cache) {
39                 pr_debug("Couldn't create fsl iommu_domain cache\n");
40                 return -ENOMEM;
41         }
42
43         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44                                                 sizeof(struct device_domain_info),
45                                                 0,
46                                                 SLAB_HWCACHE_ALIGN,
47                                                 NULL);
48         if (!iommu_devinfo_cache) {
49                 pr_debug("Couldn't create devinfo cache\n");
50                 kmem_cache_destroy(fsl_pamu_domain_cache);
51                 return -ENOMEM;
52         }
53
54         return 0;
55 }
56
57 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
58                               u32 val)
59 {
60         int ret = 0, i;
61         unsigned long flags;
62
63         spin_lock_irqsave(&iommu_lock, flags);
64         ret = pamu_update_paace_stash(liodn, val);
65         if (ret) {
66                 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
67                          i, liodn);
68                 spin_unlock_irqrestore(&iommu_lock, flags);
69                 return ret;
70         }
71
72         spin_unlock_irqrestore(&iommu_lock, flags);
73
74         return ret;
75 }
76
77 /* Set the geometry parameters for a LIODN */
78 static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
79                           int liodn)
80 {
81         struct iommu_domain *domain = &dma_domain->iommu_domain;
82         struct iommu_domain_geometry *geom = &domain->geometry;
83         u32 omi_index = ~(u32)0;
84         unsigned long flags;
85         int ret;
86
87         /*
88          * Configure the omi_index at the geometry setup time.
89          * This is a static value which depends on the type of
90          * device and would not change thereafter.
91          */
92         get_ome_index(&omi_index, dev);
93
94         spin_lock_irqsave(&iommu_lock, flags);
95         ret = pamu_disable_liodn(liodn);
96         if (ret)
97                 goto out_unlock;
98         ret = pamu_config_ppaace(liodn, geom->aperture_start,
99                                  geom->aperture_end + 1, omi_index, 0,
100                                  ~(u32)0, dma_domain->stash_id, 0);
101         if (ret)
102                 goto out_unlock;
103         ret = pamu_config_ppaace(liodn, geom->aperture_start,
104                                  geom->aperture_end + 1, ~(u32)0,
105                                  0, ~(u32)0, dma_domain->stash_id,
106                                  PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
107 out_unlock:
108         spin_unlock_irqrestore(&iommu_lock, flags);
109         if (ret) {
110                 pr_debug("PAACE configuration failed for liodn %d\n",
111                          liodn);
112         }
113         return ret;
114 }
115
116 static void remove_device_ref(struct device_domain_info *info)
117 {
118         unsigned long flags;
119
120         list_del(&info->link);
121         spin_lock_irqsave(&iommu_lock, flags);
122         pamu_disable_liodn(info->liodn);
123         spin_unlock_irqrestore(&iommu_lock, flags);
124         spin_lock_irqsave(&device_domain_lock, flags);
125         dev_iommu_priv_set(info->dev, NULL);
126         kmem_cache_free(iommu_devinfo_cache, info);
127         spin_unlock_irqrestore(&device_domain_lock, flags);
128 }
129
130 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
131 {
132         struct device_domain_info *info, *tmp;
133         unsigned long flags;
134
135         spin_lock_irqsave(&dma_domain->domain_lock, flags);
136         /* Remove the device from the domain device list */
137         list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
138                 if (!dev || (info->dev == dev))
139                         remove_device_ref(info);
140         }
141         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
142 }
143
144 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
145 {
146         struct device_domain_info *info, *old_domain_info;
147         unsigned long flags;
148
149         spin_lock_irqsave(&device_domain_lock, flags);
150         /*
151          * Check here if the device is already attached to domain or not.
152          * If the device is already attached to a domain detach it.
153          */
154         old_domain_info = dev_iommu_priv_get(dev);
155         if (old_domain_info && old_domain_info->domain != dma_domain) {
156                 spin_unlock_irqrestore(&device_domain_lock, flags);
157                 detach_device(dev, old_domain_info->domain);
158                 spin_lock_irqsave(&device_domain_lock, flags);
159         }
160
161         info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
162
163         info->dev = dev;
164         info->liodn = liodn;
165         info->domain = dma_domain;
166
167         list_add(&info->link, &dma_domain->devices);
168         /*
169          * In case of devices with multiple LIODNs just store
170          * the info for the first LIODN as all
171          * LIODNs share the same domain
172          */
173         if (!dev_iommu_priv_get(dev))
174                 dev_iommu_priv_set(dev, info);
175         spin_unlock_irqrestore(&device_domain_lock, flags);
176 }
177
178 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
179                                          dma_addr_t iova)
180 {
181         if (iova < domain->geometry.aperture_start ||
182             iova > domain->geometry.aperture_end)
183                 return 0;
184         return iova;
185 }
186
187 static bool fsl_pamu_capable(enum iommu_cap cap)
188 {
189         return cap == IOMMU_CAP_CACHE_COHERENCY;
190 }
191
192 static void fsl_pamu_domain_free(struct iommu_domain *domain)
193 {
194         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
195
196         /* remove all the devices from the device list */
197         detach_device(NULL, dma_domain);
198         kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
199 }
200
201 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
202 {
203         struct fsl_dma_domain *dma_domain;
204
205         if (type != IOMMU_DOMAIN_UNMANAGED)
206                 return NULL;
207
208         dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
209         if (!dma_domain)
210                 return NULL;
211
212         dma_domain->stash_id = ~(u32)0;
213         INIT_LIST_HEAD(&dma_domain->devices);
214         spin_lock_init(&dma_domain->domain_lock);
215
216         /* default geometry 64 GB i.e. maximum system address */
217         dma_domain->iommu_domain. geometry.aperture_start = 0;
218         dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
219         dma_domain->iommu_domain.geometry.force_aperture = true;
220
221         return &dma_domain->iommu_domain;
222 }
223
224 /* Update stash destination for all LIODNs associated with the domain */
225 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
226 {
227         struct device_domain_info *info;
228         int ret = 0;
229
230         list_for_each_entry(info, &dma_domain->devices, link) {
231                 ret = update_liodn_stash(info->liodn, dma_domain, val);
232                 if (ret)
233                         break;
234         }
235
236         return ret;
237 }
238
239 static int fsl_pamu_attach_device(struct iommu_domain *domain,
240                                   struct device *dev)
241 {
242         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
243         unsigned long flags;
244         int len, ret = 0, i;
245         const u32 *liodn;
246         struct pci_dev *pdev = NULL;
247         struct pci_controller *pci_ctl;
248
249         /*
250          * Use LIODN of the PCI controller while attaching a
251          * PCI device.
252          */
253         if (dev_is_pci(dev)) {
254                 pdev = to_pci_dev(dev);
255                 pci_ctl = pci_bus_to_host(pdev->bus);
256                 /*
257                  * make dev point to pci controller device
258                  * so we can get the LIODN programmed by
259                  * u-boot.
260                  */
261                 dev = pci_ctl->parent;
262         }
263
264         liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
265         if (!liodn) {
266                 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
267                 return -EINVAL;
268         }
269
270         spin_lock_irqsave(&dma_domain->domain_lock, flags);
271         for (i = 0; i < len / sizeof(u32); i++) {
272                 /* Ensure that LIODN value is valid */
273                 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
274                         pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
275                                  liodn[i], dev->of_node);
276                         ret = -EINVAL;
277                         break;
278                 }
279
280                 attach_device(dma_domain, liodn[i], dev);
281                 ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
282                 if (ret)
283                         break;
284                 ret = pamu_enable_liodn(liodn[i]);
285                 if (ret)
286                         break;
287         }
288         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
289         return ret;
290 }
291
292 static void fsl_pamu_detach_device(struct iommu_domain *domain,
293                                    struct device *dev)
294 {
295         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
296         const u32 *prop;
297         int len;
298         struct pci_dev *pdev = NULL;
299         struct pci_controller *pci_ctl;
300
301         /*
302          * Use LIODN of the PCI controller while detaching a
303          * PCI device.
304          */
305         if (dev_is_pci(dev)) {
306                 pdev = to_pci_dev(dev);
307                 pci_ctl = pci_bus_to_host(pdev->bus);
308                 /*
309                  * make dev point to pci controller device
310                  * so we can get the LIODN programmed by
311                  * u-boot.
312                  */
313                 dev = pci_ctl->parent;
314         }
315
316         prop = of_get_property(dev->of_node, "fsl,liodn", &len);
317         if (prop)
318                 detach_device(dev, dma_domain);
319         else
320                 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
321 }
322
323 /* Set the domain stash attribute */
324 int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
325 {
326         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
327         unsigned long flags;
328         int ret;
329
330         spin_lock_irqsave(&dma_domain->domain_lock, flags);
331         dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
332         if (dma_domain->stash_id == ~(u32)0) {
333                 pr_debug("Invalid stash attributes\n");
334                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
335                 return -EINVAL;
336         }
337         ret = update_domain_stash(dma_domain, dma_domain->stash_id);
338         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
339
340         return ret;
341 }
342
343 static struct iommu_group *get_device_iommu_group(struct device *dev)
344 {
345         struct iommu_group *group;
346
347         group = iommu_group_get(dev);
348         if (!group)
349                 group = iommu_group_alloc();
350
351         return group;
352 }
353
354 static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
355 {
356         u32 version;
357
358         /* Check the PCI controller version number by readding BRR1 register */
359         version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
360         version &= PCI_FSL_BRR1_VER;
361         /* If PCI controller version is >= 0x204 we can partition endpoints */
362         return version >= 0x204;
363 }
364
365 /* Get iommu group information from peer devices or devices on the parent bus */
366 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
367 {
368         struct pci_dev *tmp;
369         struct iommu_group *group;
370         struct pci_bus *bus = pdev->bus;
371
372         /*
373          * Traverese the pci bus device list to get
374          * the shared iommu group.
375          */
376         while (bus) {
377                 list_for_each_entry(tmp, &bus->devices, bus_list) {
378                         if (tmp == pdev)
379                                 continue;
380                         group = iommu_group_get(&tmp->dev);
381                         if (group)
382                                 return group;
383                 }
384
385                 bus = bus->parent;
386         }
387
388         return NULL;
389 }
390
391 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
392 {
393         struct pci_controller *pci_ctl;
394         bool pci_endpt_partitioning;
395         struct iommu_group *group = NULL;
396
397         pci_ctl = pci_bus_to_host(pdev->bus);
398         pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
399         /* We can partition PCIe devices so assign device group to the device */
400         if (pci_endpt_partitioning) {
401                 group = pci_device_group(&pdev->dev);
402
403                 /*
404                  * PCIe controller is not a paritionable entity
405                  * free the controller device iommu_group.
406                  */
407                 if (pci_ctl->parent->iommu_group)
408                         iommu_group_remove_device(pci_ctl->parent);
409         } else {
410                 /*
411                  * All devices connected to the controller will share the
412                  * PCI controllers device group. If this is the first
413                  * device to be probed for the pci controller, copy the
414                  * device group information from the PCI controller device
415                  * node and remove the PCI controller iommu group.
416                  * For subsequent devices, the iommu group information can
417                  * be obtained from sibling devices (i.e. from the bus_devices
418                  * link list).
419                  */
420                 if (pci_ctl->parent->iommu_group) {
421                         group = get_device_iommu_group(pci_ctl->parent);
422                         iommu_group_remove_device(pci_ctl->parent);
423                 } else {
424                         group = get_shared_pci_device_group(pdev);
425                 }
426         }
427
428         if (!group)
429                 group = ERR_PTR(-ENODEV);
430
431         return group;
432 }
433
434 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
435 {
436         struct iommu_group *group = ERR_PTR(-ENODEV);
437         int len;
438
439         /*
440          * For platform devices we allocate a separate group for
441          * each of the devices.
442          */
443         if (dev_is_pci(dev))
444                 group = get_pci_device_group(to_pci_dev(dev));
445         else if (of_get_property(dev->of_node, "fsl,liodn", &len))
446                 group = get_device_iommu_group(dev);
447
448         return group;
449 }
450
451 static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
452 {
453         return &pamu_iommu;
454 }
455
456 static void fsl_pamu_release_device(struct device *dev)
457 {
458 }
459
460 static const struct iommu_ops fsl_pamu_ops = {
461         .capable        = fsl_pamu_capable,
462         .domain_alloc   = fsl_pamu_domain_alloc,
463         .domain_free    = fsl_pamu_domain_free,
464         .attach_dev     = fsl_pamu_attach_device,
465         .detach_dev     = fsl_pamu_detach_device,
466         .iova_to_phys   = fsl_pamu_iova_to_phys,
467         .probe_device   = fsl_pamu_probe_device,
468         .release_device = fsl_pamu_release_device,
469         .device_group   = fsl_pamu_device_group,
470 };
471
472 int __init pamu_domain_init(void)
473 {
474         int ret = 0;
475
476         ret = iommu_init_mempool();
477         if (ret)
478                 return ret;
479
480         ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
481         if (ret)
482                 return ret;
483
484         iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
485
486         ret = iommu_device_register(&pamu_iommu);
487         if (ret) {
488                 iommu_device_sysfs_remove(&pamu_iommu);
489                 pr_err("Can't register iommu device\n");
490                 return ret;
491         }
492
493         bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
494         bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
495
496         return ret;
497 }