iommu/fsl_pamu: hardcode the window address and size in pamu_config_ppaace
[linux-2.6-microblaze.git] / drivers / iommu / fsl_pamu_domain.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2013 Freescale Semiconductor, Inc.
5  * Author: Varun Sethi <varun.sethi@freescale.com>
6  */
7
8 #define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
9
10 #include "fsl_pamu_domain.h"
11
12 #include <sysdev/fsl_pci.h>
13
14 /*
15  * Global spinlock that needs to be held while
16  * configuring PAMU.
17  */
18 static DEFINE_SPINLOCK(iommu_lock);
19
20 static struct kmem_cache *fsl_pamu_domain_cache;
21 static struct kmem_cache *iommu_devinfo_cache;
22 static DEFINE_SPINLOCK(device_domain_lock);
23
24 struct iommu_device pamu_iommu; /* IOMMU core code handle */
25
26 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
27 {
28         return container_of(dom, struct fsl_dma_domain, iommu_domain);
29 }
30
31 static int __init iommu_init_mempool(void)
32 {
33         fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34                                                   sizeof(struct fsl_dma_domain),
35                                                   0,
36                                                   SLAB_HWCACHE_ALIGN,
37                                                   NULL);
38         if (!fsl_pamu_domain_cache) {
39                 pr_debug("Couldn't create fsl iommu_domain cache\n");
40                 return -ENOMEM;
41         }
42
43         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44                                                 sizeof(struct device_domain_info),
45                                                 0,
46                                                 SLAB_HWCACHE_ALIGN,
47                                                 NULL);
48         if (!iommu_devinfo_cache) {
49                 pr_debug("Couldn't create devinfo cache\n");
50                 kmem_cache_destroy(fsl_pamu_domain_cache);
51                 return -ENOMEM;
52         }
53
54         return 0;
55 }
56
57 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
58                               u32 val)
59 {
60         int ret = 0, i;
61         unsigned long flags;
62
63         spin_lock_irqsave(&iommu_lock, flags);
64         ret = pamu_update_paace_stash(liodn, val);
65         if (ret) {
66                 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
67                          i, liodn);
68                 spin_unlock_irqrestore(&iommu_lock, flags);
69                 return ret;
70         }
71
72         spin_unlock_irqrestore(&iommu_lock, flags);
73
74         return ret;
75 }
76
77 /* Set the geometry parameters for a LIODN */
78 static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
79                           int liodn)
80 {
81         u32 omi_index = ~(u32)0;
82         unsigned long flags;
83         int ret;
84
85         /*
86          * Configure the omi_index at the geometry setup time.
87          * This is a static value which depends on the type of
88          * device and would not change thereafter.
89          */
90         get_ome_index(&omi_index, dev);
91
92         spin_lock_irqsave(&iommu_lock, flags);
93         ret = pamu_disable_liodn(liodn);
94         if (ret)
95                 goto out_unlock;
96         ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
97         if (ret)
98                 goto out_unlock;
99         ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
100                                  PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
101 out_unlock:
102         spin_unlock_irqrestore(&iommu_lock, flags);
103         if (ret) {
104                 pr_debug("PAACE configuration failed for liodn %d\n",
105                          liodn);
106         }
107         return ret;
108 }
109
110 static void remove_device_ref(struct device_domain_info *info)
111 {
112         unsigned long flags;
113
114         list_del(&info->link);
115         spin_lock_irqsave(&iommu_lock, flags);
116         pamu_disable_liodn(info->liodn);
117         spin_unlock_irqrestore(&iommu_lock, flags);
118         spin_lock_irqsave(&device_domain_lock, flags);
119         dev_iommu_priv_set(info->dev, NULL);
120         kmem_cache_free(iommu_devinfo_cache, info);
121         spin_unlock_irqrestore(&device_domain_lock, flags);
122 }
123
124 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
125 {
126         struct device_domain_info *info, *tmp;
127         unsigned long flags;
128
129         spin_lock_irqsave(&dma_domain->domain_lock, flags);
130         /* Remove the device from the domain device list */
131         list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
132                 if (!dev || (info->dev == dev))
133                         remove_device_ref(info);
134         }
135         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
136 }
137
138 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
139 {
140         struct device_domain_info *info, *old_domain_info;
141         unsigned long flags;
142
143         spin_lock_irqsave(&device_domain_lock, flags);
144         /*
145          * Check here if the device is already attached to domain or not.
146          * If the device is already attached to a domain detach it.
147          */
148         old_domain_info = dev_iommu_priv_get(dev);
149         if (old_domain_info && old_domain_info->domain != dma_domain) {
150                 spin_unlock_irqrestore(&device_domain_lock, flags);
151                 detach_device(dev, old_domain_info->domain);
152                 spin_lock_irqsave(&device_domain_lock, flags);
153         }
154
155         info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
156
157         info->dev = dev;
158         info->liodn = liodn;
159         info->domain = dma_domain;
160
161         list_add(&info->link, &dma_domain->devices);
162         /*
163          * In case of devices with multiple LIODNs just store
164          * the info for the first LIODN as all
165          * LIODNs share the same domain
166          */
167         if (!dev_iommu_priv_get(dev))
168                 dev_iommu_priv_set(dev, info);
169         spin_unlock_irqrestore(&device_domain_lock, flags);
170 }
171
172 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
173                                          dma_addr_t iova)
174 {
175         if (iova < domain->geometry.aperture_start ||
176             iova > domain->geometry.aperture_end)
177                 return 0;
178         return iova;
179 }
180
181 static bool fsl_pamu_capable(enum iommu_cap cap)
182 {
183         return cap == IOMMU_CAP_CACHE_COHERENCY;
184 }
185
186 static void fsl_pamu_domain_free(struct iommu_domain *domain)
187 {
188         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
189
190         /* remove all the devices from the device list */
191         detach_device(NULL, dma_domain);
192         kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
193 }
194
195 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
196 {
197         struct fsl_dma_domain *dma_domain;
198
199         if (type != IOMMU_DOMAIN_UNMANAGED)
200                 return NULL;
201
202         dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
203         if (!dma_domain)
204                 return NULL;
205
206         dma_domain->stash_id = ~(u32)0;
207         INIT_LIST_HEAD(&dma_domain->devices);
208         spin_lock_init(&dma_domain->domain_lock);
209
210         /* default geometry 64 GB i.e. maximum system address */
211         dma_domain->iommu_domain. geometry.aperture_start = 0;
212         dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
213         dma_domain->iommu_domain.geometry.force_aperture = true;
214
215         return &dma_domain->iommu_domain;
216 }
217
218 /* Update stash destination for all LIODNs associated with the domain */
219 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
220 {
221         struct device_domain_info *info;
222         int ret = 0;
223
224         list_for_each_entry(info, &dma_domain->devices, link) {
225                 ret = update_liodn_stash(info->liodn, dma_domain, val);
226                 if (ret)
227                         break;
228         }
229
230         return ret;
231 }
232
233 static int fsl_pamu_attach_device(struct iommu_domain *domain,
234                                   struct device *dev)
235 {
236         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
237         unsigned long flags;
238         int len, ret = 0, i;
239         const u32 *liodn;
240         struct pci_dev *pdev = NULL;
241         struct pci_controller *pci_ctl;
242
243         /*
244          * Use LIODN of the PCI controller while attaching a
245          * PCI device.
246          */
247         if (dev_is_pci(dev)) {
248                 pdev = to_pci_dev(dev);
249                 pci_ctl = pci_bus_to_host(pdev->bus);
250                 /*
251                  * make dev point to pci controller device
252                  * so we can get the LIODN programmed by
253                  * u-boot.
254                  */
255                 dev = pci_ctl->parent;
256         }
257
258         liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
259         if (!liodn) {
260                 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
261                 return -EINVAL;
262         }
263
264         spin_lock_irqsave(&dma_domain->domain_lock, flags);
265         for (i = 0; i < len / sizeof(u32); i++) {
266                 /* Ensure that LIODN value is valid */
267                 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
268                         pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
269                                  liodn[i], dev->of_node);
270                         ret = -EINVAL;
271                         break;
272                 }
273
274                 attach_device(dma_domain, liodn[i], dev);
275                 ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
276                 if (ret)
277                         break;
278                 ret = pamu_enable_liodn(liodn[i]);
279                 if (ret)
280                         break;
281         }
282         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
283         return ret;
284 }
285
286 static void fsl_pamu_detach_device(struct iommu_domain *domain,
287                                    struct device *dev)
288 {
289         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
290         const u32 *prop;
291         int len;
292         struct pci_dev *pdev = NULL;
293         struct pci_controller *pci_ctl;
294
295         /*
296          * Use LIODN of the PCI controller while detaching a
297          * PCI device.
298          */
299         if (dev_is_pci(dev)) {
300                 pdev = to_pci_dev(dev);
301                 pci_ctl = pci_bus_to_host(pdev->bus);
302                 /*
303                  * make dev point to pci controller device
304                  * so we can get the LIODN programmed by
305                  * u-boot.
306                  */
307                 dev = pci_ctl->parent;
308         }
309
310         prop = of_get_property(dev->of_node, "fsl,liodn", &len);
311         if (prop)
312                 detach_device(dev, dma_domain);
313         else
314                 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
315 }
316
317 /* Set the domain stash attribute */
318 int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
319 {
320         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
321         unsigned long flags;
322         int ret;
323
324         spin_lock_irqsave(&dma_domain->domain_lock, flags);
325         dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
326         if (dma_domain->stash_id == ~(u32)0) {
327                 pr_debug("Invalid stash attributes\n");
328                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
329                 return -EINVAL;
330         }
331         ret = update_domain_stash(dma_domain, dma_domain->stash_id);
332         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
333
334         return ret;
335 }
336
337 static struct iommu_group *get_device_iommu_group(struct device *dev)
338 {
339         struct iommu_group *group;
340
341         group = iommu_group_get(dev);
342         if (!group)
343                 group = iommu_group_alloc();
344
345         return group;
346 }
347
348 static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
349 {
350         u32 version;
351
352         /* Check the PCI controller version number by readding BRR1 register */
353         version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
354         version &= PCI_FSL_BRR1_VER;
355         /* If PCI controller version is >= 0x204 we can partition endpoints */
356         return version >= 0x204;
357 }
358
359 /* Get iommu group information from peer devices or devices on the parent bus */
360 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
361 {
362         struct pci_dev *tmp;
363         struct iommu_group *group;
364         struct pci_bus *bus = pdev->bus;
365
366         /*
367          * Traverese the pci bus device list to get
368          * the shared iommu group.
369          */
370         while (bus) {
371                 list_for_each_entry(tmp, &bus->devices, bus_list) {
372                         if (tmp == pdev)
373                                 continue;
374                         group = iommu_group_get(&tmp->dev);
375                         if (group)
376                                 return group;
377                 }
378
379                 bus = bus->parent;
380         }
381
382         return NULL;
383 }
384
385 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
386 {
387         struct pci_controller *pci_ctl;
388         bool pci_endpt_partitioning;
389         struct iommu_group *group = NULL;
390
391         pci_ctl = pci_bus_to_host(pdev->bus);
392         pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
393         /* We can partition PCIe devices so assign device group to the device */
394         if (pci_endpt_partitioning) {
395                 group = pci_device_group(&pdev->dev);
396
397                 /*
398                  * PCIe controller is not a paritionable entity
399                  * free the controller device iommu_group.
400                  */
401                 if (pci_ctl->parent->iommu_group)
402                         iommu_group_remove_device(pci_ctl->parent);
403         } else {
404                 /*
405                  * All devices connected to the controller will share the
406                  * PCI controllers device group. If this is the first
407                  * device to be probed for the pci controller, copy the
408                  * device group information from the PCI controller device
409                  * node and remove the PCI controller iommu group.
410                  * For subsequent devices, the iommu group information can
411                  * be obtained from sibling devices (i.e. from the bus_devices
412                  * link list).
413                  */
414                 if (pci_ctl->parent->iommu_group) {
415                         group = get_device_iommu_group(pci_ctl->parent);
416                         iommu_group_remove_device(pci_ctl->parent);
417                 } else {
418                         group = get_shared_pci_device_group(pdev);
419                 }
420         }
421
422         if (!group)
423                 group = ERR_PTR(-ENODEV);
424
425         return group;
426 }
427
428 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
429 {
430         struct iommu_group *group = ERR_PTR(-ENODEV);
431         int len;
432
433         /*
434          * For platform devices we allocate a separate group for
435          * each of the devices.
436          */
437         if (dev_is_pci(dev))
438                 group = get_pci_device_group(to_pci_dev(dev));
439         else if (of_get_property(dev->of_node, "fsl,liodn", &len))
440                 group = get_device_iommu_group(dev);
441
442         return group;
443 }
444
445 static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
446 {
447         return &pamu_iommu;
448 }
449
450 static void fsl_pamu_release_device(struct device *dev)
451 {
452 }
453
454 static const struct iommu_ops fsl_pamu_ops = {
455         .capable        = fsl_pamu_capable,
456         .domain_alloc   = fsl_pamu_domain_alloc,
457         .domain_free    = fsl_pamu_domain_free,
458         .attach_dev     = fsl_pamu_attach_device,
459         .detach_dev     = fsl_pamu_detach_device,
460         .iova_to_phys   = fsl_pamu_iova_to_phys,
461         .probe_device   = fsl_pamu_probe_device,
462         .release_device = fsl_pamu_release_device,
463         .device_group   = fsl_pamu_device_group,
464 };
465
466 int __init pamu_domain_init(void)
467 {
468         int ret = 0;
469
470         ret = iommu_init_mempool();
471         if (ret)
472                 return ret;
473
474         ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
475         if (ret)
476                 return ret;
477
478         iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
479
480         ret = iommu_device_register(&pamu_iommu);
481         if (ret) {
482                 iommu_device_sysfs_remove(&pamu_iommu);
483                 pr_err("Can't register iommu device\n");
484                 return ret;
485         }
486
487         bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
488         bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
489
490         return ret;
491 }