iommu: remove the unused domain_window_disable method
[linux-2.6-microblaze.git] / drivers / iommu / fsl_pamu_domain.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2013 Freescale Semiconductor, Inc.
5  * Author: Varun Sethi <varun.sethi@freescale.com>
6  */
7
8 #define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
9
10 #include "fsl_pamu_domain.h"
11
12 #include <sysdev/fsl_pci.h>
13
14 /*
15  * Global spinlock that needs to be held while
16  * configuring PAMU.
17  */
18 static DEFINE_SPINLOCK(iommu_lock);
19
20 static struct kmem_cache *fsl_pamu_domain_cache;
21 static struct kmem_cache *iommu_devinfo_cache;
22 static DEFINE_SPINLOCK(device_domain_lock);
23
24 struct iommu_device pamu_iommu; /* IOMMU core code handle */
25
26 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
27 {
28         return container_of(dom, struct fsl_dma_domain, iommu_domain);
29 }
30
31 static int __init iommu_init_mempool(void)
32 {
33         fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34                                                   sizeof(struct fsl_dma_domain),
35                                                   0,
36                                                   SLAB_HWCACHE_ALIGN,
37                                                   NULL);
38         if (!fsl_pamu_domain_cache) {
39                 pr_debug("Couldn't create fsl iommu_domain cache\n");
40                 return -ENOMEM;
41         }
42
43         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44                                                 sizeof(struct device_domain_info),
45                                                 0,
46                                                 SLAB_HWCACHE_ALIGN,
47                                                 NULL);
48         if (!iommu_devinfo_cache) {
49                 pr_debug("Couldn't create devinfo cache\n");
50                 kmem_cache_destroy(fsl_pamu_domain_cache);
51                 return -ENOMEM;
52         }
53
54         return 0;
55 }
56
57 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
58 {
59         u32 win_cnt = dma_domain->win_cnt;
60         struct dma_window *win_ptr = &dma_domain->win_arr[0];
61         struct iommu_domain_geometry *geom;
62
63         geom = &dma_domain->iommu_domain.geometry;
64
65         if (!win_cnt || !dma_domain->geom_size) {
66                 pr_debug("Number of windows/geometry not configured for the domain\n");
67                 return 0;
68         }
69
70         if (win_cnt > 1) {
71                 u64 subwin_size;
72                 dma_addr_t subwin_iova;
73                 u32 wnd;
74
75                 subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
76                 subwin_iova = iova & ~(subwin_size - 1);
77                 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
78                 win_ptr = &dma_domain->win_arr[wnd];
79         }
80
81         if (win_ptr->valid)
82                 return win_ptr->paddr + (iova & (win_ptr->size - 1));
83
84         return 0;
85 }
86
87 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
88 {
89         struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
90         int i, ret;
91         unsigned long rpn, flags;
92
93         for (i = 0; i < dma_domain->win_cnt; i++) {
94                 if (sub_win_ptr[i].valid) {
95                         rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
96                         spin_lock_irqsave(&iommu_lock, flags);
97                         ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
98                                                  sub_win_ptr[i].size,
99                                                  ~(u32)0,
100                                                  rpn,
101                                                  dma_domain->snoop_id,
102                                                  dma_domain->stash_id,
103                                                  (i > 0) ? 1 : 0,
104                                                  sub_win_ptr[i].prot);
105                         spin_unlock_irqrestore(&iommu_lock, flags);
106                         if (ret) {
107                                 pr_debug("SPAACE configuration failed for liodn %d\n",
108                                          liodn);
109                                 return ret;
110                         }
111                 }
112         }
113
114         return ret;
115 }
116
117 static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
118 {
119         int ret;
120         struct dma_window *wnd = &dma_domain->win_arr[0];
121         phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
122         unsigned long flags;
123
124         spin_lock_irqsave(&iommu_lock, flags);
125         ret = pamu_config_ppaace(liodn, wnd_addr,
126                                  wnd->size,
127                                  ~(u32)0,
128                                  wnd->paddr >> PAMU_PAGE_SHIFT,
129                                  dma_domain->snoop_id, dma_domain->stash_id,
130                                  0, wnd->prot);
131         spin_unlock_irqrestore(&iommu_lock, flags);
132         if (ret)
133                 pr_debug("PAACE configuration failed for liodn %d\n", liodn);
134
135         return ret;
136 }
137
138 /* Map the DMA window corresponding to the LIODN */
139 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
140 {
141         if (dma_domain->win_cnt > 1)
142                 return map_subwins(liodn, dma_domain);
143         else
144                 return map_win(liodn, dma_domain);
145 }
146
147 /* Update window/subwindow mapping for the LIODN */
148 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
149 {
150         int ret;
151         struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
152         unsigned long flags;
153
154         spin_lock_irqsave(&iommu_lock, flags);
155         if (dma_domain->win_cnt > 1) {
156                 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
157                                          wnd->size,
158                                          ~(u32)0,
159                                          wnd->paddr >> PAMU_PAGE_SHIFT,
160                                          dma_domain->snoop_id,
161                                          dma_domain->stash_id,
162                                          (wnd_nr > 0) ? 1 : 0,
163                                          wnd->prot);
164                 if (ret)
165                         pr_debug("Subwindow reconfiguration failed for liodn %d\n",
166                                  liodn);
167         } else {
168                 phys_addr_t wnd_addr;
169
170                 wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
171
172                 ret = pamu_config_ppaace(liodn, wnd_addr,
173                                          wnd->size,
174                                          ~(u32)0,
175                                          wnd->paddr >> PAMU_PAGE_SHIFT,
176                                          dma_domain->snoop_id, dma_domain->stash_id,
177                                          0, wnd->prot);
178                 if (ret)
179                         pr_debug("Window reconfiguration failed for liodn %d\n",
180                                  liodn);
181         }
182
183         spin_unlock_irqrestore(&iommu_lock, flags);
184
185         return ret;
186 }
187
188 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
189                               u32 val)
190 {
191         int ret = 0, i;
192         unsigned long flags;
193
194         spin_lock_irqsave(&iommu_lock, flags);
195         if (!dma_domain->win_arr) {
196                 pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
197                          liodn);
198                 spin_unlock_irqrestore(&iommu_lock, flags);
199                 return -EINVAL;
200         }
201
202         for (i = 0; i < dma_domain->win_cnt; i++) {
203                 ret = pamu_update_paace_stash(liodn, i, val);
204                 if (ret) {
205                         pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
206                                  i, liodn);
207                         spin_unlock_irqrestore(&iommu_lock, flags);
208                         return ret;
209                 }
210         }
211
212         spin_unlock_irqrestore(&iommu_lock, flags);
213
214         return ret;
215 }
216
217 /* Set the geometry parameters for a LIODN */
218 static int pamu_set_liodn(int liodn, struct device *dev,
219                           struct fsl_dma_domain *dma_domain,
220                           struct iommu_domain_geometry *geom_attr,
221                           u32 win_cnt)
222 {
223         phys_addr_t window_addr, window_size;
224         phys_addr_t subwin_size;
225         int ret = 0, i;
226         u32 omi_index = ~(u32)0;
227         unsigned long flags;
228
229         /*
230          * Configure the omi_index at the geometry setup time.
231          * This is a static value which depends on the type of
232          * device and would not change thereafter.
233          */
234         get_ome_index(&omi_index, dev);
235
236         window_addr = geom_attr->aperture_start;
237         window_size = dma_domain->geom_size;
238
239         spin_lock_irqsave(&iommu_lock, flags);
240         ret = pamu_disable_liodn(liodn);
241         if (!ret)
242                 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
243                                          0, dma_domain->snoop_id,
244                                          dma_domain->stash_id, win_cnt, 0);
245         spin_unlock_irqrestore(&iommu_lock, flags);
246         if (ret) {
247                 pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
248                          liodn, win_cnt);
249                 return ret;
250         }
251
252         if (win_cnt > 1) {
253                 subwin_size = window_size >> ilog2(win_cnt);
254                 for (i = 0; i < win_cnt; i++) {
255                         spin_lock_irqsave(&iommu_lock, flags);
256                         ret = pamu_disable_spaace(liodn, i);
257                         if (!ret)
258                                 ret = pamu_config_spaace(liodn, win_cnt, i,
259                                                          subwin_size, omi_index,
260                                                          0, dma_domain->snoop_id,
261                                                          dma_domain->stash_id,
262                                                          0, 0);
263                         spin_unlock_irqrestore(&iommu_lock, flags);
264                         if (ret) {
265                                 pr_debug("SPAACE configuration failed for liodn %d\n",
266                                          liodn);
267                                 return ret;
268                         }
269                 }
270         }
271
272         return ret;
273 }
274
275 static int check_size(u64 size, dma_addr_t iova)
276 {
277         /*
278          * Size must be a power of two and at least be equal
279          * to PAMU page size.
280          */
281         if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
282                 pr_debug("Size too small or not a power of two\n");
283                 return -EINVAL;
284         }
285
286         /* iova must be page size aligned */
287         if (iova & (size - 1)) {
288                 pr_debug("Address is not aligned with window size\n");
289                 return -EINVAL;
290         }
291
292         return 0;
293 }
294
295 static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
296 {
297         struct fsl_dma_domain *domain;
298
299         domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
300         if (!domain)
301                 return NULL;
302
303         domain->stash_id = ~(u32)0;
304         domain->snoop_id = ~(u32)0;
305         domain->win_cnt = pamu_get_max_subwin_cnt();
306         domain->geom_size = 0;
307
308         INIT_LIST_HEAD(&domain->devices);
309
310         spin_lock_init(&domain->domain_lock);
311
312         return domain;
313 }
314
315 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
316 {
317         unsigned long flags;
318
319         list_del(&info->link);
320         spin_lock_irqsave(&iommu_lock, flags);
321         if (win_cnt > 1)
322                 pamu_free_subwins(info->liodn);
323         pamu_disable_liodn(info->liodn);
324         spin_unlock_irqrestore(&iommu_lock, flags);
325         spin_lock_irqsave(&device_domain_lock, flags);
326         dev_iommu_priv_set(info->dev, NULL);
327         kmem_cache_free(iommu_devinfo_cache, info);
328         spin_unlock_irqrestore(&device_domain_lock, flags);
329 }
330
331 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
332 {
333         struct device_domain_info *info, *tmp;
334         unsigned long flags;
335
336         spin_lock_irqsave(&dma_domain->domain_lock, flags);
337         /* Remove the device from the domain device list */
338         list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
339                 if (!dev || (info->dev == dev))
340                         remove_device_ref(info, dma_domain->win_cnt);
341         }
342         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
343 }
344
345 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
346 {
347         struct device_domain_info *info, *old_domain_info;
348         unsigned long flags;
349
350         spin_lock_irqsave(&device_domain_lock, flags);
351         /*
352          * Check here if the device is already attached to domain or not.
353          * If the device is already attached to a domain detach it.
354          */
355         old_domain_info = dev_iommu_priv_get(dev);
356         if (old_domain_info && old_domain_info->domain != dma_domain) {
357                 spin_unlock_irqrestore(&device_domain_lock, flags);
358                 detach_device(dev, old_domain_info->domain);
359                 spin_lock_irqsave(&device_domain_lock, flags);
360         }
361
362         info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
363
364         info->dev = dev;
365         info->liodn = liodn;
366         info->domain = dma_domain;
367
368         list_add(&info->link, &dma_domain->devices);
369         /*
370          * In case of devices with multiple LIODNs just store
371          * the info for the first LIODN as all
372          * LIODNs share the same domain
373          */
374         if (!dev_iommu_priv_get(dev))
375                 dev_iommu_priv_set(dev, info);
376         spin_unlock_irqrestore(&device_domain_lock, flags);
377 }
378
379 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
380                                          dma_addr_t iova)
381 {
382         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
383
384         if (iova < domain->geometry.aperture_start ||
385             iova > domain->geometry.aperture_end)
386                 return 0;
387
388         return get_phys_addr(dma_domain, iova);
389 }
390
391 static bool fsl_pamu_capable(enum iommu_cap cap)
392 {
393         return cap == IOMMU_CAP_CACHE_COHERENCY;
394 }
395
396 static void fsl_pamu_domain_free(struct iommu_domain *domain)
397 {
398         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
399
400         /* remove all the devices from the device list */
401         detach_device(NULL, dma_domain);
402
403         dma_domain->enabled = 0;
404         dma_domain->mapped = 0;
405
406         kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
407 }
408
409 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
410 {
411         struct fsl_dma_domain *dma_domain;
412
413         if (type != IOMMU_DOMAIN_UNMANAGED)
414                 return NULL;
415
416         dma_domain = iommu_alloc_dma_domain();
417         if (!dma_domain) {
418                 pr_debug("dma_domain allocation failed\n");
419                 return NULL;
420         }
421         /* defaul geometry 64 GB i.e. maximum system address */
422         dma_domain->iommu_domain. geometry.aperture_start = 0;
423         dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
424         dma_domain->iommu_domain.geometry.force_aperture = true;
425
426         return &dma_domain->iommu_domain;
427 }
428
429 /* Configure geometry settings for all LIODNs associated with domain */
430 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
431                                     struct iommu_domain_geometry *geom_attr,
432                                     u32 win_cnt)
433 {
434         struct device_domain_info *info;
435         int ret = 0;
436
437         list_for_each_entry(info, &dma_domain->devices, link) {
438                 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
439                                      geom_attr, win_cnt);
440                 if (ret)
441                         break;
442         }
443
444         return ret;
445 }
446
447 /* Update stash destination for all LIODNs associated with the domain */
448 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
449 {
450         struct device_domain_info *info;
451         int ret = 0;
452
453         list_for_each_entry(info, &dma_domain->devices, link) {
454                 ret = update_liodn_stash(info->liodn, dma_domain, val);
455                 if (ret)
456                         break;
457         }
458
459         return ret;
460 }
461
462 /* Update domain mappings for all LIODNs associated with the domain */
463 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
464 {
465         struct device_domain_info *info;
466         int ret = 0;
467
468         list_for_each_entry(info, &dma_domain->devices, link) {
469                 ret = update_liodn(info->liodn, dma_domain, wnd_nr);
470                 if (ret)
471                         break;
472         }
473         return ret;
474 }
475
476
477 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
478                                   phys_addr_t paddr, u64 size, int prot)
479 {
480         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
481         struct dma_window *wnd;
482         int pamu_prot = 0;
483         int ret;
484         unsigned long flags;
485         u64 win_size;
486
487         if (prot & IOMMU_READ)
488                 pamu_prot |= PAACE_AP_PERMS_QUERY;
489         if (prot & IOMMU_WRITE)
490                 pamu_prot |= PAACE_AP_PERMS_UPDATE;
491
492         spin_lock_irqsave(&dma_domain->domain_lock, flags);
493         if (!dma_domain->win_arr) {
494                 pr_debug("Number of windows not configured\n");
495                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
496                 return -ENODEV;
497         }
498
499         if (wnd_nr >= dma_domain->win_cnt) {
500                 pr_debug("Invalid window index\n");
501                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
502                 return -EINVAL;
503         }
504
505         win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
506         if (size > win_size) {
507                 pr_debug("Invalid window size\n");
508                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
509                 return -EINVAL;
510         }
511
512         if (dma_domain->win_cnt == 1) {
513                 if (dma_domain->enabled) {
514                         pr_debug("Disable the window before updating the mapping\n");
515                         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
516                         return -EBUSY;
517                 }
518
519                 ret = check_size(size, domain->geometry.aperture_start);
520                 if (ret) {
521                         pr_debug("Aperture start not aligned to the size\n");
522                         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
523                         return -EINVAL;
524                 }
525         }
526
527         wnd = &dma_domain->win_arr[wnd_nr];
528         if (!wnd->valid) {
529                 wnd->paddr = paddr;
530                 wnd->size = size;
531                 wnd->prot = pamu_prot;
532
533                 ret = update_domain_mapping(dma_domain, wnd_nr);
534                 if (!ret) {
535                         wnd->valid = 1;
536                         dma_domain->mapped++;
537                 }
538         } else {
539                 pr_debug("Disable the window before updating the mapping\n");
540                 ret = -EBUSY;
541         }
542
543         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
544
545         return ret;
546 }
547
548 /*
549  * Attach the LIODN to the DMA domain and configure the geometry
550  * and window mappings.
551  */
552 static int handle_attach_device(struct fsl_dma_domain *dma_domain,
553                                 struct device *dev, const u32 *liodn,
554                                 int num)
555 {
556         unsigned long flags;
557         struct iommu_domain *domain = &dma_domain->iommu_domain;
558         int ret = 0;
559         int i;
560
561         spin_lock_irqsave(&dma_domain->domain_lock, flags);
562         for (i = 0; i < num; i++) {
563                 /* Ensure that LIODN value is valid */
564                 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
565                         pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
566                                  liodn[i], dev->of_node);
567                         ret = -EINVAL;
568                         break;
569                 }
570
571                 attach_device(dma_domain, liodn[i], dev);
572                 /*
573                  * Check if geometry has already been configured
574                  * for the domain. If yes, set the geometry for
575                  * the LIODN.
576                  */
577                 if (dma_domain->win_arr) {
578                         u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
579
580                         ret = pamu_set_liodn(liodn[i], dev, dma_domain,
581                                              &domain->geometry, win_cnt);
582                         if (ret)
583                                 break;
584                         if (dma_domain->mapped) {
585                                 /*
586                                  * Create window/subwindow mapping for
587                                  * the LIODN.
588                                  */
589                                 ret = map_liodn(liodn[i], dma_domain);
590                                 if (ret)
591                                         break;
592                         }
593                 }
594         }
595         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
596
597         return ret;
598 }
599
600 static int fsl_pamu_attach_device(struct iommu_domain *domain,
601                                   struct device *dev)
602 {
603         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
604         const u32 *liodn;
605         u32 liodn_cnt;
606         int len, ret = 0;
607         struct pci_dev *pdev = NULL;
608         struct pci_controller *pci_ctl;
609
610         /*
611          * Use LIODN of the PCI controller while attaching a
612          * PCI device.
613          */
614         if (dev_is_pci(dev)) {
615                 pdev = to_pci_dev(dev);
616                 pci_ctl = pci_bus_to_host(pdev->bus);
617                 /*
618                  * make dev point to pci controller device
619                  * so we can get the LIODN programmed by
620                  * u-boot.
621                  */
622                 dev = pci_ctl->parent;
623         }
624
625         liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
626         if (liodn) {
627                 liodn_cnt = len / sizeof(u32);
628                 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
629         } else {
630                 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
631                 ret = -EINVAL;
632         }
633
634         return ret;
635 }
636
637 static void fsl_pamu_detach_device(struct iommu_domain *domain,
638                                    struct device *dev)
639 {
640         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
641         const u32 *prop;
642         int len;
643         struct pci_dev *pdev = NULL;
644         struct pci_controller *pci_ctl;
645
646         /*
647          * Use LIODN of the PCI controller while detaching a
648          * PCI device.
649          */
650         if (dev_is_pci(dev)) {
651                 pdev = to_pci_dev(dev);
652                 pci_ctl = pci_bus_to_host(pdev->bus);
653                 /*
654                  * make dev point to pci controller device
655                  * so we can get the LIODN programmed by
656                  * u-boot.
657                  */
658                 dev = pci_ctl->parent;
659         }
660
661         prop = of_get_property(dev->of_node, "fsl,liodn", &len);
662         if (prop)
663                 detach_device(dev, dma_domain);
664         else
665                 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
666 }
667
668 static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
669 {
670         struct iommu_domain_geometry *geom_attr = data;
671         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
672         dma_addr_t geom_size;
673         unsigned long flags;
674
675         geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
676         /*
677          * Sanity check the geometry size. Also, we do not support
678          * DMA outside of the geometry.
679          */
680         if (check_size(geom_size, geom_attr->aperture_start) ||
681             !geom_attr->force_aperture) {
682                 pr_debug("Invalid PAMU geometry attributes\n");
683                 return -EINVAL;
684         }
685
686         spin_lock_irqsave(&dma_domain->domain_lock, flags);
687         if (dma_domain->enabled) {
688                 pr_debug("Can't set geometry attributes as domain is active\n");
689                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
690                 return  -EBUSY;
691         }
692
693         /* Copy the domain geometry information */
694         memcpy(&domain->geometry, geom_attr,
695                sizeof(struct iommu_domain_geometry));
696         dma_domain->geom_size = geom_size;
697
698         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
699
700         return 0;
701 }
702
703 /* Set the domain stash attribute */
704 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
705 {
706         struct pamu_stash_attribute *stash_attr = data;
707         unsigned long flags;
708         int ret;
709
710         spin_lock_irqsave(&dma_domain->domain_lock, flags);
711
712         memcpy(&dma_domain->dma_stash, stash_attr,
713                sizeof(struct pamu_stash_attribute));
714
715         dma_domain->stash_id = get_stash_id(stash_attr->cache,
716                                             stash_attr->cpu);
717         if (dma_domain->stash_id == ~(u32)0) {
718                 pr_debug("Invalid stash attributes\n");
719                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
720                 return -EINVAL;
721         }
722
723         ret = update_domain_stash(dma_domain, dma_domain->stash_id);
724
725         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
726
727         return ret;
728 }
729
730 /* Configure domain dma state i.e. enable/disable DMA */
731 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
732 {
733         struct device_domain_info *info;
734         unsigned long flags;
735         int ret;
736
737         spin_lock_irqsave(&dma_domain->domain_lock, flags);
738
739         if (enable && !dma_domain->mapped) {
740                 pr_debug("Can't enable DMA domain without valid mapping\n");
741                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
742                 return -ENODEV;
743         }
744
745         dma_domain->enabled = enable;
746         list_for_each_entry(info, &dma_domain->devices, link) {
747                 ret = (enable) ? pamu_enable_liodn(info->liodn) :
748                         pamu_disable_liodn(info->liodn);
749                 if (ret)
750                         pr_debug("Unable to set dma state for liodn %d",
751                                  info->liodn);
752         }
753         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
754
755         return 0;
756 }
757
758 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
759 {
760         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
761         unsigned long flags;
762         int ret;
763
764         spin_lock_irqsave(&dma_domain->domain_lock, flags);
765         /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
766         if (dma_domain->enabled) {
767                 pr_debug("Can't set geometry attributes as domain is active\n");
768                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
769                 return  -EBUSY;
770         }
771
772         /* Ensure that the geometry has been set for the domain */
773         if (!dma_domain->geom_size) {
774                 pr_debug("Please configure geometry before setting the number of windows\n");
775                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
776                 return -EINVAL;
777         }
778
779         /*
780          * Ensure we have valid window count i.e. it should be less than
781          * maximum permissible limit and should be a power of two.
782          */
783         if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
784                 pr_debug("Invalid window count\n");
785                 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
786                 return -EINVAL;
787         }
788
789         ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
790                                        w_count > 1 ? w_count : 0);
791         if (!ret) {
792                 kfree(dma_domain->win_arr);
793                 dma_domain->win_arr = kcalloc(w_count,
794                                               sizeof(*dma_domain->win_arr),
795                                               GFP_ATOMIC);
796                 if (!dma_domain->win_arr) {
797                         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
798                         return -ENOMEM;
799                 }
800                 dma_domain->win_cnt = w_count;
801         }
802         spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
803
804         return ret;
805 }
806
807 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
808                                     enum iommu_attr attr_type, void *data)
809 {
810         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
811         int ret = 0;
812
813         switch (attr_type) {
814         case DOMAIN_ATTR_GEOMETRY:
815                 ret = configure_domain_geometry(domain, data);
816                 break;
817         case DOMAIN_ATTR_FSL_PAMU_STASH:
818                 ret = configure_domain_stash(dma_domain, data);
819                 break;
820         case DOMAIN_ATTR_FSL_PAMU_ENABLE:
821                 ret = configure_domain_dma_state(dma_domain, *(int *)data);
822                 break;
823         case DOMAIN_ATTR_WINDOWS:
824                 ret = fsl_pamu_set_windows(domain, *(u32 *)data);
825                 break;
826         default:
827                 pr_debug("Unsupported attribute type\n");
828                 ret = -EINVAL;
829                 break;
830         }
831
832         return ret;
833 }
834
835 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
836                                     enum iommu_attr attr_type, void *data)
837 {
838         struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
839         int ret = 0;
840
841         switch (attr_type) {
842         case DOMAIN_ATTR_FSL_PAMU_STASH:
843                 memcpy(data, &dma_domain->dma_stash,
844                        sizeof(struct pamu_stash_attribute));
845                 break;
846         case DOMAIN_ATTR_FSL_PAMU_ENABLE:
847                 *(int *)data = dma_domain->enabled;
848                 break;
849         case DOMAIN_ATTR_FSL_PAMUV1:
850                 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
851                 break;
852         case DOMAIN_ATTR_WINDOWS:
853                 *(u32 *)data = dma_domain->win_cnt;
854                 break;
855         default:
856                 pr_debug("Unsupported attribute type\n");
857                 ret = -EINVAL;
858                 break;
859         }
860
861         return ret;
862 }
863
864 static struct iommu_group *get_device_iommu_group(struct device *dev)
865 {
866         struct iommu_group *group;
867
868         group = iommu_group_get(dev);
869         if (!group)
870                 group = iommu_group_alloc();
871
872         return group;
873 }
874
875 static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
876 {
877         u32 version;
878
879         /* Check the PCI controller version number by readding BRR1 register */
880         version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
881         version &= PCI_FSL_BRR1_VER;
882         /* If PCI controller version is >= 0x204 we can partition endpoints */
883         return version >= 0x204;
884 }
885
886 /* Get iommu group information from peer devices or devices on the parent bus */
887 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
888 {
889         struct pci_dev *tmp;
890         struct iommu_group *group;
891         struct pci_bus *bus = pdev->bus;
892
893         /*
894          * Traverese the pci bus device list to get
895          * the shared iommu group.
896          */
897         while (bus) {
898                 list_for_each_entry(tmp, &bus->devices, bus_list) {
899                         if (tmp == pdev)
900                                 continue;
901                         group = iommu_group_get(&tmp->dev);
902                         if (group)
903                                 return group;
904                 }
905
906                 bus = bus->parent;
907         }
908
909         return NULL;
910 }
911
912 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
913 {
914         struct pci_controller *pci_ctl;
915         bool pci_endpt_partitioning;
916         struct iommu_group *group = NULL;
917
918         pci_ctl = pci_bus_to_host(pdev->bus);
919         pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
920         /* We can partition PCIe devices so assign device group to the device */
921         if (pci_endpt_partitioning) {
922                 group = pci_device_group(&pdev->dev);
923
924                 /*
925                  * PCIe controller is not a paritionable entity
926                  * free the controller device iommu_group.
927                  */
928                 if (pci_ctl->parent->iommu_group)
929                         iommu_group_remove_device(pci_ctl->parent);
930         } else {
931                 /*
932                  * All devices connected to the controller will share the
933                  * PCI controllers device group. If this is the first
934                  * device to be probed for the pci controller, copy the
935                  * device group information from the PCI controller device
936                  * node and remove the PCI controller iommu group.
937                  * For subsequent devices, the iommu group information can
938                  * be obtained from sibling devices (i.e. from the bus_devices
939                  * link list).
940                  */
941                 if (pci_ctl->parent->iommu_group) {
942                         group = get_device_iommu_group(pci_ctl->parent);
943                         iommu_group_remove_device(pci_ctl->parent);
944                 } else {
945                         group = get_shared_pci_device_group(pdev);
946                 }
947         }
948
949         if (!group)
950                 group = ERR_PTR(-ENODEV);
951
952         return group;
953 }
954
955 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
956 {
957         struct iommu_group *group = ERR_PTR(-ENODEV);
958         int len;
959
960         /*
961          * For platform devices we allocate a separate group for
962          * each of the devices.
963          */
964         if (dev_is_pci(dev))
965                 group = get_pci_device_group(to_pci_dev(dev));
966         else if (of_get_property(dev->of_node, "fsl,liodn", &len))
967                 group = get_device_iommu_group(dev);
968
969         return group;
970 }
971
972 static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
973 {
974         return &pamu_iommu;
975 }
976
977 static void fsl_pamu_release_device(struct device *dev)
978 {
979 }
980
981 static const struct iommu_ops fsl_pamu_ops = {
982         .capable        = fsl_pamu_capable,
983         .domain_alloc   = fsl_pamu_domain_alloc,
984         .domain_free    = fsl_pamu_domain_free,
985         .attach_dev     = fsl_pamu_attach_device,
986         .detach_dev     = fsl_pamu_detach_device,
987         .domain_window_enable = fsl_pamu_window_enable,
988         .iova_to_phys   = fsl_pamu_iova_to_phys,
989         .domain_set_attr = fsl_pamu_set_domain_attr,
990         .domain_get_attr = fsl_pamu_get_domain_attr,
991         .probe_device   = fsl_pamu_probe_device,
992         .release_device = fsl_pamu_release_device,
993         .device_group   = fsl_pamu_device_group,
994 };
995
996 int __init pamu_domain_init(void)
997 {
998         int ret = 0;
999
1000         ret = iommu_init_mempool();
1001         if (ret)
1002                 return ret;
1003
1004         ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
1005         if (ret)
1006                 return ret;
1007
1008         iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
1009
1010         ret = iommu_device_register(&pamu_iommu);
1011         if (ret) {
1012                 iommu_device_sysfs_remove(&pamu_iommu);
1013                 pr_err("Can't register iommu device\n");
1014                 return ret;
1015         }
1016
1017         bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1018         bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1019
1020         return ret;
1021 }