Merge tag 'iomap-5.15-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-microblaze.git] / drivers / iommu / ipmmu-vmsa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IOMMU API for Renesas VMSA-compatible IPMMU
4  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
5  *
6  * Copyright (C) 2014-2020 Renesas Electronics Corporation
7  */
8
9 #include <linux/bitmap.h>
10 #include <linux/delay.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-pgtable.h>
19 #include <linux/iommu.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26 #include <linux/sys_soc.h>
27
28 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
29 #include <asm/dma-iommu.h>
30 #else
31 #define arm_iommu_create_mapping(...)   NULL
32 #define arm_iommu_attach_device(...)    -ENODEV
33 #define arm_iommu_release_mapping(...)  do {} while (0)
34 #define arm_iommu_detach_device(...)    do {} while (0)
35 #endif
36
37 #define IPMMU_CTX_MAX           8U
38 #define IPMMU_CTX_INVALID       -1
39
40 #define IPMMU_UTLB_MAX          48U
41
42 struct ipmmu_features {
43         bool use_ns_alias_offset;
44         bool has_cache_leaf_nodes;
45         unsigned int number_of_contexts;
46         unsigned int num_utlbs;
47         bool setup_imbuscr;
48         bool twobit_imttbcr_sl0;
49         bool reserved_context;
50         bool cache_snoop;
51         unsigned int ctx_offset_base;
52         unsigned int ctx_offset_stride;
53         unsigned int utlb_offset_base;
54 };
55
56 struct ipmmu_vmsa_device {
57         struct device *dev;
58         void __iomem *base;
59         struct iommu_device iommu;
60         struct ipmmu_vmsa_device *root;
61         const struct ipmmu_features *features;
62         unsigned int num_ctx;
63         spinlock_t lock;                        /* Protects ctx and domains[] */
64         DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
65         struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
66         s8 utlb_ctx[IPMMU_UTLB_MAX];
67
68         struct iommu_group *group;
69         struct dma_iommu_mapping *mapping;
70 };
71
72 struct ipmmu_vmsa_domain {
73         struct ipmmu_vmsa_device *mmu;
74         struct iommu_domain io_domain;
75
76         struct io_pgtable_cfg cfg;
77         struct io_pgtable_ops *iop;
78
79         unsigned int context_id;
80         struct mutex mutex;                     /* Protects mappings */
81 };
82
83 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
84 {
85         return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
86 }
87
88 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
89 {
90         return dev_iommu_priv_get(dev);
91 }
92
93 #define TLB_LOOP_TIMEOUT                100     /* 100us */
94
95 /* -----------------------------------------------------------------------------
96  * Registers Definition
97  */
98
99 #define IM_NS_ALIAS_OFFSET              0x800
100
101 /* MMU "context" registers */
102 #define IMCTR                           0x0000          /* R-Car Gen2/3 */
103 #define IMCTR_INTEN                     (1 << 2)        /* R-Car Gen2/3 */
104 #define IMCTR_FLUSH                     (1 << 1)        /* R-Car Gen2/3 */
105 #define IMCTR_MMUEN                     (1 << 0)        /* R-Car Gen2/3 */
106
107 #define IMTTBCR                         0x0008          /* R-Car Gen2/3 */
108 #define IMTTBCR_EAE                     (1 << 31)       /* R-Car Gen2/3 */
109 #define IMTTBCR_SH0_INNER_SHAREABLE     (3 << 12)       /* R-Car Gen2 only */
110 #define IMTTBCR_ORGN0_WB_WA             (1 << 10)       /* R-Car Gen2 only */
111 #define IMTTBCR_IRGN0_WB_WA             (1 << 8)        /* R-Car Gen2 only */
112 #define IMTTBCR_SL0_TWOBIT_LVL_1        (2 << 6)        /* R-Car Gen3 only */
113 #define IMTTBCR_SL0_LVL_1               (1 << 4)        /* R-Car Gen2 only */
114
115 #define IMBUSCR                         0x000c          /* R-Car Gen2 only */
116 #define IMBUSCR_DVM                     (1 << 2)        /* R-Car Gen2 only */
117 #define IMBUSCR_BUSSEL_MASK             (3 << 0)        /* R-Car Gen2 only */
118
119 #define IMTTLBR0                        0x0010          /* R-Car Gen2/3 */
120 #define IMTTUBR0                        0x0014          /* R-Car Gen2/3 */
121
122 #define IMSTR                           0x0020          /* R-Car Gen2/3 */
123 #define IMSTR_MHIT                      (1 << 4)        /* R-Car Gen2/3 */
124 #define IMSTR_ABORT                     (1 << 2)        /* R-Car Gen2/3 */
125 #define IMSTR_PF                        (1 << 1)        /* R-Car Gen2/3 */
126 #define IMSTR_TF                        (1 << 0)        /* R-Car Gen2/3 */
127
128 #define IMMAIR0                         0x0028          /* R-Car Gen2/3 */
129
130 #define IMELAR                          0x0030          /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
131 #define IMEUAR                          0x0034          /* R-Car Gen3 only */
132
133 /* uTLB registers */
134 #define IMUCTR(n)                       ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
135 #define IMUCTR0(n)                      (0x0300 + ((n) * 16))           /* R-Car Gen2/3 */
136 #define IMUCTR32(n)                     (0x0600 + (((n) - 32) * 16))    /* R-Car Gen3 only */
137 #define IMUCTR_TTSEL_MMU(n)             ((n) << 4)      /* R-Car Gen2/3 */
138 #define IMUCTR_FLUSH                    (1 << 1)        /* R-Car Gen2/3 */
139 #define IMUCTR_MMUEN                    (1 << 0)        /* R-Car Gen2/3 */
140
141 #define IMUASID(n)                      ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
142 #define IMUASID0(n)                     (0x0308 + ((n) * 16))           /* R-Car Gen2/3 */
143 #define IMUASID32(n)                    (0x0608 + (((n) - 32) * 16))    /* R-Car Gen3 only */
144
145 /* -----------------------------------------------------------------------------
146  * Root device handling
147  */
148
149 static struct platform_driver ipmmu_driver;
150
151 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
152 {
153         return mmu->root == mmu;
154 }
155
156 static int __ipmmu_check_device(struct device *dev, void *data)
157 {
158         struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
159         struct ipmmu_vmsa_device **rootp = data;
160
161         if (ipmmu_is_root(mmu))
162                 *rootp = mmu;
163
164         return 0;
165 }
166
167 static struct ipmmu_vmsa_device *ipmmu_find_root(void)
168 {
169         struct ipmmu_vmsa_device *root = NULL;
170
171         return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
172                                       __ipmmu_check_device) == 0 ? root : NULL;
173 }
174
175 /* -----------------------------------------------------------------------------
176  * Read/Write Access
177  */
178
179 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
180 {
181         return ioread32(mmu->base + offset);
182 }
183
184 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
185                         u32 data)
186 {
187         iowrite32(data, mmu->base + offset);
188 }
189
190 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
191                                   unsigned int context_id, unsigned int reg)
192 {
193         return mmu->features->ctx_offset_base +
194                context_id * mmu->features->ctx_offset_stride + reg;
195 }
196
197 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
198                           unsigned int context_id, unsigned int reg)
199 {
200         return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
201 }
202
203 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
204                             unsigned int context_id, unsigned int reg, u32 data)
205 {
206         ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
207 }
208
209 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
210                                unsigned int reg)
211 {
212         return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
213 }
214
215 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
216                                  unsigned int reg, u32 data)
217 {
218         ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
219 }
220
221 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
222                                 unsigned int reg, u32 data)
223 {
224         if (domain->mmu != domain->mmu->root)
225                 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
226
227         ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
228 }
229
230 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
231 {
232         return mmu->features->utlb_offset_base + reg;
233 }
234
235 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
236                                 unsigned int utlb, u32 data)
237 {
238         ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
239 }
240
241 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
242                                unsigned int utlb, u32 data)
243 {
244         ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
245 }
246
247 /* -----------------------------------------------------------------------------
248  * TLB and microTLB Management
249  */
250
251 /* Wait for any pending TLB invalidations to complete */
252 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
253 {
254         unsigned int count = 0;
255
256         while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
257                 cpu_relax();
258                 if (++count == TLB_LOOP_TIMEOUT) {
259                         dev_err_ratelimited(domain->mmu->dev,
260                         "TLB sync timed out -- MMU may be deadlocked\n");
261                         return;
262                 }
263                 udelay(1);
264         }
265 }
266
267 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
268 {
269         u32 reg;
270
271         reg = ipmmu_ctx_read_root(domain, IMCTR);
272         reg |= IMCTR_FLUSH;
273         ipmmu_ctx_write_all(domain, IMCTR, reg);
274
275         ipmmu_tlb_sync(domain);
276 }
277
278 /*
279  * Enable MMU translation for the microTLB.
280  */
281 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
282                               unsigned int utlb)
283 {
284         struct ipmmu_vmsa_device *mmu = domain->mmu;
285
286         /*
287          * TODO: Reference-count the microTLB as several bus masters can be
288          * connected to the same microTLB.
289          */
290
291         /* TODO: What should we set the ASID to ? */
292         ipmmu_imuasid_write(mmu, utlb, 0);
293         /* TODO: Do we need to flush the microTLB ? */
294         ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
295                                       IMUCTR_FLUSH | IMUCTR_MMUEN);
296         mmu->utlb_ctx[utlb] = domain->context_id;
297 }
298
299 /*
300  * Disable MMU translation for the microTLB.
301  */
302 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
303                                unsigned int utlb)
304 {
305         struct ipmmu_vmsa_device *mmu = domain->mmu;
306
307         ipmmu_imuctr_write(mmu, utlb, 0);
308         mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
309 }
310
311 static void ipmmu_tlb_flush_all(void *cookie)
312 {
313         struct ipmmu_vmsa_domain *domain = cookie;
314
315         ipmmu_tlb_invalidate(domain);
316 }
317
318 static void ipmmu_tlb_flush(unsigned long iova, size_t size,
319                                 size_t granule, void *cookie)
320 {
321         ipmmu_tlb_flush_all(cookie);
322 }
323
324 static const struct iommu_flush_ops ipmmu_flush_ops = {
325         .tlb_flush_all = ipmmu_tlb_flush_all,
326         .tlb_flush_walk = ipmmu_tlb_flush,
327 };
328
329 /* -----------------------------------------------------------------------------
330  * Domain/Context Management
331  */
332
333 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
334                                          struct ipmmu_vmsa_domain *domain)
335 {
336         unsigned long flags;
337         int ret;
338
339         spin_lock_irqsave(&mmu->lock, flags);
340
341         ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
342         if (ret != mmu->num_ctx) {
343                 mmu->domains[ret] = domain;
344                 set_bit(ret, mmu->ctx);
345         } else
346                 ret = -EBUSY;
347
348         spin_unlock_irqrestore(&mmu->lock, flags);
349
350         return ret;
351 }
352
353 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
354                                       unsigned int context_id)
355 {
356         unsigned long flags;
357
358         spin_lock_irqsave(&mmu->lock, flags);
359
360         clear_bit(context_id, mmu->ctx);
361         mmu->domains[context_id] = NULL;
362
363         spin_unlock_irqrestore(&mmu->lock, flags);
364 }
365
366 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
367 {
368         u64 ttbr;
369         u32 tmp;
370
371         /* TTBR0 */
372         ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
373         ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
374         ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
375
376         /*
377          * TTBCR
378          * We use long descriptors and allocate the whole 32-bit VA space to
379          * TTBR0.
380          */
381         if (domain->mmu->features->twobit_imttbcr_sl0)
382                 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
383         else
384                 tmp = IMTTBCR_SL0_LVL_1;
385
386         if (domain->mmu->features->cache_snoop)
387                 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
388                        IMTTBCR_IRGN0_WB_WA;
389
390         ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
391
392         /* MAIR0 */
393         ipmmu_ctx_write_root(domain, IMMAIR0,
394                              domain->cfg.arm_lpae_s1_cfg.mair);
395
396         /* IMBUSCR */
397         if (domain->mmu->features->setup_imbuscr)
398                 ipmmu_ctx_write_root(domain, IMBUSCR,
399                                      ipmmu_ctx_read_root(domain, IMBUSCR) &
400                                      ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
401
402         /*
403          * IMSTR
404          * Clear all interrupt flags.
405          */
406         ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
407
408         /*
409          * IMCTR
410          * Enable the MMU and interrupt generation. The long-descriptor
411          * translation table format doesn't use TEX remapping. Don't enable AF
412          * software management as we have no use for it. Flush the TLB as
413          * required when modifying the context registers.
414          */
415         ipmmu_ctx_write_all(domain, IMCTR,
416                             IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
417 }
418
419 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
420 {
421         int ret;
422
423         /*
424          * Allocate the page table operations.
425          *
426          * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
427          * access, Long-descriptor format" that the NStable bit being set in a
428          * table descriptor will result in the NStable and NS bits of all child
429          * entries being ignored and considered as being set. The IPMMU seems
430          * not to comply with this, as it generates a secure access page fault
431          * if any of the NStable and NS bits isn't set when running in
432          * non-secure mode.
433          */
434         domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
435         domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
436         domain->cfg.ias = 32;
437         domain->cfg.oas = 40;
438         domain->cfg.tlb = &ipmmu_flush_ops;
439         domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
440         domain->io_domain.geometry.force_aperture = true;
441         /*
442          * TODO: Add support for coherent walk through CCI with DVM and remove
443          * cache handling. For now, delegate it to the io-pgtable code.
444          */
445         domain->cfg.coherent_walk = false;
446         domain->cfg.iommu_dev = domain->mmu->root->dev;
447
448         /*
449          * Find an unused context.
450          */
451         ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
452         if (ret < 0)
453                 return ret;
454
455         domain->context_id = ret;
456
457         domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
458                                            domain);
459         if (!domain->iop) {
460                 ipmmu_domain_free_context(domain->mmu->root,
461                                           domain->context_id);
462                 return -EINVAL;
463         }
464
465         ipmmu_domain_setup_context(domain);
466         return 0;
467 }
468
469 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
470 {
471         if (!domain->mmu)
472                 return;
473
474         /*
475          * Disable the context. Flush the TLB as required when modifying the
476          * context registers.
477          *
478          * TODO: Is TLB flush really needed ?
479          */
480         ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
481         ipmmu_tlb_sync(domain);
482         ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
483 }
484
485 /* -----------------------------------------------------------------------------
486  * Fault Handling
487  */
488
489 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
490 {
491         const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
492         struct ipmmu_vmsa_device *mmu = domain->mmu;
493         unsigned long iova;
494         u32 status;
495
496         status = ipmmu_ctx_read_root(domain, IMSTR);
497         if (!(status & err_mask))
498                 return IRQ_NONE;
499
500         iova = ipmmu_ctx_read_root(domain, IMELAR);
501         if (IS_ENABLED(CONFIG_64BIT))
502                 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
503
504         /*
505          * Clear the error status flags. Unlike traditional interrupt flag
506          * registers that must be cleared by writing 1, this status register
507          * seems to require 0. The error address register must be read before,
508          * otherwise its value will be 0.
509          */
510         ipmmu_ctx_write_root(domain, IMSTR, 0);
511
512         /* Log fatal errors. */
513         if (status & IMSTR_MHIT)
514                 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
515                                     iova);
516         if (status & IMSTR_ABORT)
517                 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
518                                     iova);
519
520         if (!(status & (IMSTR_PF | IMSTR_TF)))
521                 return IRQ_NONE;
522
523         /*
524          * Try to handle page faults and translation faults.
525          *
526          * TODO: We need to look up the faulty device based on the I/O VA. Use
527          * the IOMMU device for now.
528          */
529         if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
530                 return IRQ_HANDLED;
531
532         dev_err_ratelimited(mmu->dev,
533                             "Unhandled fault: status 0x%08x iova 0x%lx\n",
534                             status, iova);
535
536         return IRQ_HANDLED;
537 }
538
539 static irqreturn_t ipmmu_irq(int irq, void *dev)
540 {
541         struct ipmmu_vmsa_device *mmu = dev;
542         irqreturn_t status = IRQ_NONE;
543         unsigned int i;
544         unsigned long flags;
545
546         spin_lock_irqsave(&mmu->lock, flags);
547
548         /*
549          * Check interrupts for all active contexts.
550          */
551         for (i = 0; i < mmu->num_ctx; i++) {
552                 if (!mmu->domains[i])
553                         continue;
554                 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
555                         status = IRQ_HANDLED;
556         }
557
558         spin_unlock_irqrestore(&mmu->lock, flags);
559
560         return status;
561 }
562
563 /* -----------------------------------------------------------------------------
564  * IOMMU Operations
565  */
566
567 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
568 {
569         struct ipmmu_vmsa_domain *domain;
570
571         domain = kzalloc(sizeof(*domain), GFP_KERNEL);
572         if (!domain)
573                 return NULL;
574
575         mutex_init(&domain->mutex);
576
577         return &domain->io_domain;
578 }
579
580 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
581 {
582         struct iommu_domain *io_domain = NULL;
583
584         switch (type) {
585         case IOMMU_DOMAIN_UNMANAGED:
586                 io_domain = __ipmmu_domain_alloc(type);
587                 break;
588
589         case IOMMU_DOMAIN_DMA:
590                 io_domain = __ipmmu_domain_alloc(type);
591                 if (io_domain && iommu_get_dma_cookie(io_domain)) {
592                         kfree(io_domain);
593                         io_domain = NULL;
594                 }
595                 break;
596         }
597
598         return io_domain;
599 }
600
601 static void ipmmu_domain_free(struct iommu_domain *io_domain)
602 {
603         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
604
605         /*
606          * Free the domain resources. We assume that all devices have already
607          * been detached.
608          */
609         iommu_put_dma_cookie(io_domain);
610         ipmmu_domain_destroy_context(domain);
611         free_io_pgtable_ops(domain->iop);
612         kfree(domain);
613 }
614
615 static int ipmmu_attach_device(struct iommu_domain *io_domain,
616                                struct device *dev)
617 {
618         struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
619         struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
620         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
621         unsigned int i;
622         int ret = 0;
623
624         if (!mmu) {
625                 dev_err(dev, "Cannot attach to IPMMU\n");
626                 return -ENXIO;
627         }
628
629         mutex_lock(&domain->mutex);
630
631         if (!domain->mmu) {
632                 /* The domain hasn't been used yet, initialize it. */
633                 domain->mmu = mmu;
634                 ret = ipmmu_domain_init_context(domain);
635                 if (ret < 0) {
636                         dev_err(dev, "Unable to initialize IPMMU context\n");
637                         domain->mmu = NULL;
638                 } else {
639                         dev_info(dev, "Using IPMMU context %u\n",
640                                  domain->context_id);
641                 }
642         } else if (domain->mmu != mmu) {
643                 /*
644                  * Something is wrong, we can't attach two devices using
645                  * different IOMMUs to the same domain.
646                  */
647                 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
648                         dev_name(mmu->dev), dev_name(domain->mmu->dev));
649                 ret = -EINVAL;
650         } else
651                 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
652
653         mutex_unlock(&domain->mutex);
654
655         if (ret < 0)
656                 return ret;
657
658         for (i = 0; i < fwspec->num_ids; ++i)
659                 ipmmu_utlb_enable(domain, fwspec->ids[i]);
660
661         return 0;
662 }
663
664 static void ipmmu_detach_device(struct iommu_domain *io_domain,
665                                 struct device *dev)
666 {
667         struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
668         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
669         unsigned int i;
670
671         for (i = 0; i < fwspec->num_ids; ++i)
672                 ipmmu_utlb_disable(domain, fwspec->ids[i]);
673
674         /*
675          * TODO: Optimize by disabling the context when no device is attached.
676          */
677 }
678
679 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
680                      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
681 {
682         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
683
684         if (!domain)
685                 return -ENODEV;
686
687         return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
688 }
689
690 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
691                           size_t size, struct iommu_iotlb_gather *gather)
692 {
693         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
694
695         return domain->iop->unmap(domain->iop, iova, size, gather);
696 }
697
698 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
699 {
700         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
701
702         if (domain->mmu)
703                 ipmmu_tlb_flush_all(domain);
704 }
705
706 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
707                              struct iommu_iotlb_gather *gather)
708 {
709         ipmmu_flush_iotlb_all(io_domain);
710 }
711
712 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
713                                       dma_addr_t iova)
714 {
715         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
716
717         /* TODO: Is locking needed ? */
718
719         return domain->iop->iova_to_phys(domain->iop, iova);
720 }
721
722 static int ipmmu_init_platform_device(struct device *dev,
723                                       struct of_phandle_args *args)
724 {
725         struct platform_device *ipmmu_pdev;
726
727         ipmmu_pdev = of_find_device_by_node(args->np);
728         if (!ipmmu_pdev)
729                 return -ENODEV;
730
731         dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
732
733         return 0;
734 }
735
736 static const struct soc_device_attribute soc_needs_opt_in[] = {
737         { .family = "R-Car Gen3", },
738         { .family = "RZ/G2", },
739         { /* sentinel */ }
740 };
741
742 static const struct soc_device_attribute soc_denylist[] = {
743         { .soc_id = "r8a774a1", },
744         { .soc_id = "r8a7795", .revision = "ES1.*" },
745         { .soc_id = "r8a7795", .revision = "ES2.*" },
746         { .soc_id = "r8a7796", },
747         { /* sentinel */ }
748 };
749
750 static const char * const devices_allowlist[] = {
751         "ee100000.mmc",
752         "ee120000.mmc",
753         "ee140000.mmc",
754         "ee160000.mmc"
755 };
756
757 static bool ipmmu_device_is_allowed(struct device *dev)
758 {
759         unsigned int i;
760
761         /*
762          * R-Car Gen3 and RZ/G2 use the allow list to opt-in devices.
763          * For Other SoCs, this returns true anyway.
764          */
765         if (!soc_device_match(soc_needs_opt_in))
766                 return true;
767
768         /* Check whether this SoC can use the IPMMU correctly or not */
769         if (soc_device_match(soc_denylist))
770                 return false;
771
772         /* Check whether this device can work with the IPMMU */
773         for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
774                 if (!strcmp(dev_name(dev), devices_allowlist[i]))
775                         return true;
776         }
777
778         /* Otherwise, do not allow use of IPMMU */
779         return false;
780 }
781
782 static int ipmmu_of_xlate(struct device *dev,
783                           struct of_phandle_args *spec)
784 {
785         if (!ipmmu_device_is_allowed(dev))
786                 return -ENODEV;
787
788         iommu_fwspec_add_ids(dev, spec->args, 1);
789
790         /* Initialize once - xlate() will call multiple times */
791         if (to_ipmmu(dev))
792                 return 0;
793
794         return ipmmu_init_platform_device(dev, spec);
795 }
796
797 static int ipmmu_init_arm_mapping(struct device *dev)
798 {
799         struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
800         int ret;
801
802         /*
803          * Create the ARM mapping, used by the ARM DMA mapping core to allocate
804          * VAs. This will allocate a corresponding IOMMU domain.
805          *
806          * TODO:
807          * - Create one mapping per context (TLB).
808          * - Make the mapping size configurable ? We currently use a 2GB mapping
809          *   at a 1GB offset to ensure that NULL VAs will fault.
810          */
811         if (!mmu->mapping) {
812                 struct dma_iommu_mapping *mapping;
813
814                 mapping = arm_iommu_create_mapping(&platform_bus_type,
815                                                    SZ_1G, SZ_2G);
816                 if (IS_ERR(mapping)) {
817                         dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
818                         ret = PTR_ERR(mapping);
819                         goto error;
820                 }
821
822                 mmu->mapping = mapping;
823         }
824
825         /* Attach the ARM VA mapping to the device. */
826         ret = arm_iommu_attach_device(dev, mmu->mapping);
827         if (ret < 0) {
828                 dev_err(dev, "Failed to attach device to VA mapping\n");
829                 goto error;
830         }
831
832         return 0;
833
834 error:
835         if (mmu->mapping)
836                 arm_iommu_release_mapping(mmu->mapping);
837
838         return ret;
839 }
840
841 static struct iommu_device *ipmmu_probe_device(struct device *dev)
842 {
843         struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
844
845         /*
846          * Only let through devices that have been verified in xlate()
847          */
848         if (!mmu)
849                 return ERR_PTR(-ENODEV);
850
851         return &mmu->iommu;
852 }
853
854 static void ipmmu_probe_finalize(struct device *dev)
855 {
856         int ret = 0;
857
858         if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
859                 ret = ipmmu_init_arm_mapping(dev);
860
861         if (ret)
862                 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
863 }
864
865 static void ipmmu_release_device(struct device *dev)
866 {
867         arm_iommu_detach_device(dev);
868 }
869
870 static struct iommu_group *ipmmu_find_group(struct device *dev)
871 {
872         struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
873         struct iommu_group *group;
874
875         if (mmu->group)
876                 return iommu_group_ref_get(mmu->group);
877
878         group = iommu_group_alloc();
879         if (!IS_ERR(group))
880                 mmu->group = group;
881
882         return group;
883 }
884
885 static const struct iommu_ops ipmmu_ops = {
886         .domain_alloc = ipmmu_domain_alloc,
887         .domain_free = ipmmu_domain_free,
888         .attach_dev = ipmmu_attach_device,
889         .detach_dev = ipmmu_detach_device,
890         .map = ipmmu_map,
891         .unmap = ipmmu_unmap,
892         .flush_iotlb_all = ipmmu_flush_iotlb_all,
893         .iotlb_sync = ipmmu_iotlb_sync,
894         .iova_to_phys = ipmmu_iova_to_phys,
895         .probe_device = ipmmu_probe_device,
896         .release_device = ipmmu_release_device,
897         .probe_finalize = ipmmu_probe_finalize,
898         .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
899                         ? generic_device_group : ipmmu_find_group,
900         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
901         .of_xlate = ipmmu_of_xlate,
902 };
903
904 /* -----------------------------------------------------------------------------
905  * Probe/remove and init
906  */
907
908 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
909 {
910         unsigned int i;
911
912         /* Disable all contexts. */
913         for (i = 0; i < mmu->num_ctx; ++i)
914                 ipmmu_ctx_write(mmu, i, IMCTR, 0);
915 }
916
917 static const struct ipmmu_features ipmmu_features_default = {
918         .use_ns_alias_offset = true,
919         .has_cache_leaf_nodes = false,
920         .number_of_contexts = 1, /* software only tested with one context */
921         .num_utlbs = 32,
922         .setup_imbuscr = true,
923         .twobit_imttbcr_sl0 = false,
924         .reserved_context = false,
925         .cache_snoop = true,
926         .ctx_offset_base = 0,
927         .ctx_offset_stride = 0x40,
928         .utlb_offset_base = 0,
929 };
930
931 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
932         .use_ns_alias_offset = false,
933         .has_cache_leaf_nodes = true,
934         .number_of_contexts = 8,
935         .num_utlbs = 48,
936         .setup_imbuscr = false,
937         .twobit_imttbcr_sl0 = true,
938         .reserved_context = true,
939         .cache_snoop = false,
940         .ctx_offset_base = 0,
941         .ctx_offset_stride = 0x40,
942         .utlb_offset_base = 0,
943 };
944
945 static const struct of_device_id ipmmu_of_ids[] = {
946         {
947                 .compatible = "renesas,ipmmu-vmsa",
948                 .data = &ipmmu_features_default,
949         }, {
950                 .compatible = "renesas,ipmmu-r8a774a1",
951                 .data = &ipmmu_features_rcar_gen3,
952         }, {
953                 .compatible = "renesas,ipmmu-r8a774b1",
954                 .data = &ipmmu_features_rcar_gen3,
955         }, {
956                 .compatible = "renesas,ipmmu-r8a774c0",
957                 .data = &ipmmu_features_rcar_gen3,
958         }, {
959                 .compatible = "renesas,ipmmu-r8a774e1",
960                 .data = &ipmmu_features_rcar_gen3,
961         }, {
962                 .compatible = "renesas,ipmmu-r8a7795",
963                 .data = &ipmmu_features_rcar_gen3,
964         }, {
965                 .compatible = "renesas,ipmmu-r8a7796",
966                 .data = &ipmmu_features_rcar_gen3,
967         }, {
968                 .compatible = "renesas,ipmmu-r8a77961",
969                 .data = &ipmmu_features_rcar_gen3,
970         }, {
971                 .compatible = "renesas,ipmmu-r8a77965",
972                 .data = &ipmmu_features_rcar_gen3,
973         }, {
974                 .compatible = "renesas,ipmmu-r8a77970",
975                 .data = &ipmmu_features_rcar_gen3,
976         }, {
977                 .compatible = "renesas,ipmmu-r8a77990",
978                 .data = &ipmmu_features_rcar_gen3,
979         }, {
980                 .compatible = "renesas,ipmmu-r8a77995",
981                 .data = &ipmmu_features_rcar_gen3,
982         }, {
983                 /* Terminator */
984         },
985 };
986
987 static int ipmmu_probe(struct platform_device *pdev)
988 {
989         struct ipmmu_vmsa_device *mmu;
990         struct resource *res;
991         int irq;
992         int ret;
993
994         mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
995         if (!mmu) {
996                 dev_err(&pdev->dev, "cannot allocate device data\n");
997                 return -ENOMEM;
998         }
999
1000         mmu->dev = &pdev->dev;
1001         spin_lock_init(&mmu->lock);
1002         bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1003         mmu->features = of_device_get_match_data(&pdev->dev);
1004         memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1005         dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1006
1007         /* Map I/O memory and request IRQ. */
1008         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1009         mmu->base = devm_ioremap_resource(&pdev->dev, res);
1010         if (IS_ERR(mmu->base))
1011                 return PTR_ERR(mmu->base);
1012
1013         /*
1014          * The IPMMU has two register banks, for secure and non-secure modes.
1015          * The bank mapped at the beginning of the IPMMU address space
1016          * corresponds to the running mode of the CPU. When running in secure
1017          * mode the non-secure register bank is also available at an offset.
1018          *
1019          * Secure mode operation isn't clearly documented and is thus currently
1020          * not implemented in the driver. Furthermore, preliminary tests of
1021          * non-secure operation with the main register bank were not successful.
1022          * Offset the registers base unconditionally to point to the non-secure
1023          * alias space for now.
1024          */
1025         if (mmu->features->use_ns_alias_offset)
1026                 mmu->base += IM_NS_ALIAS_OFFSET;
1027
1028         mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1029
1030         /*
1031          * Determine if this IPMMU instance is a root device by checking for
1032          * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1033          */
1034         if (!mmu->features->has_cache_leaf_nodes ||
1035             !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1036                 mmu->root = mmu;
1037         else
1038                 mmu->root = ipmmu_find_root();
1039
1040         /*
1041          * Wait until the root device has been registered for sure.
1042          */
1043         if (!mmu->root)
1044                 return -EPROBE_DEFER;
1045
1046         /* Root devices have mandatory IRQs */
1047         if (ipmmu_is_root(mmu)) {
1048                 irq = platform_get_irq(pdev, 0);
1049                 if (irq < 0)
1050                         return irq;
1051
1052                 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1053                                        dev_name(&pdev->dev), mmu);
1054                 if (ret < 0) {
1055                         dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1056                         return ret;
1057                 }
1058
1059                 ipmmu_device_reset(mmu);
1060
1061                 if (mmu->features->reserved_context) {
1062                         dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1063                         set_bit(0, mmu->ctx);
1064                 }
1065         }
1066
1067         /*
1068          * Register the IPMMU to the IOMMU subsystem in the following cases:
1069          * - R-Car Gen2 IPMMU (all devices registered)
1070          * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1071          */
1072         if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1073                 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1074                                              dev_name(&pdev->dev));
1075                 if (ret)
1076                         return ret;
1077
1078                 ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
1079                 if (ret)
1080                         return ret;
1081
1082 #if defined(CONFIG_IOMMU_DMA)
1083                 if (!iommu_present(&platform_bus_type))
1084                         bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1085 #endif
1086         }
1087
1088         /*
1089          * We can't create the ARM mapping here as it requires the bus to have
1090          * an IOMMU, which only happens when bus_set_iommu() is called in
1091          * ipmmu_init() after the probe function returns.
1092          */
1093
1094         platform_set_drvdata(pdev, mmu);
1095
1096         return 0;
1097 }
1098
1099 static int ipmmu_remove(struct platform_device *pdev)
1100 {
1101         struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1102
1103         iommu_device_sysfs_remove(&mmu->iommu);
1104         iommu_device_unregister(&mmu->iommu);
1105
1106         arm_iommu_release_mapping(mmu->mapping);
1107
1108         ipmmu_device_reset(mmu);
1109
1110         return 0;
1111 }
1112
1113 #ifdef CONFIG_PM_SLEEP
1114 static int ipmmu_resume_noirq(struct device *dev)
1115 {
1116         struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1117         unsigned int i;
1118
1119         /* Reset root MMU and restore contexts */
1120         if (ipmmu_is_root(mmu)) {
1121                 ipmmu_device_reset(mmu);
1122
1123                 for (i = 0; i < mmu->num_ctx; i++) {
1124                         if (!mmu->domains[i])
1125                                 continue;
1126
1127                         ipmmu_domain_setup_context(mmu->domains[i]);
1128                 }
1129         }
1130
1131         /* Re-enable active micro-TLBs */
1132         for (i = 0; i < mmu->features->num_utlbs; i++) {
1133                 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1134                         continue;
1135
1136                 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1137         }
1138
1139         return 0;
1140 }
1141
1142 static const struct dev_pm_ops ipmmu_pm  = {
1143         SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1144 };
1145 #define DEV_PM_OPS      &ipmmu_pm
1146 #else
1147 #define DEV_PM_OPS      NULL
1148 #endif /* CONFIG_PM_SLEEP */
1149
1150 static struct platform_driver ipmmu_driver = {
1151         .driver = {
1152                 .name = "ipmmu-vmsa",
1153                 .of_match_table = of_match_ptr(ipmmu_of_ids),
1154                 .pm = DEV_PM_OPS,
1155         },
1156         .probe = ipmmu_probe,
1157         .remove = ipmmu_remove,
1158 };
1159
1160 static int __init ipmmu_init(void)
1161 {
1162         struct device_node *np;
1163         static bool setup_done;
1164         int ret;
1165
1166         if (setup_done)
1167                 return 0;
1168
1169         np = of_find_matching_node(NULL, ipmmu_of_ids);
1170         if (!np)
1171                 return 0;
1172
1173         of_node_put(np);
1174
1175         ret = platform_driver_register(&ipmmu_driver);
1176         if (ret < 0)
1177                 return ret;
1178
1179 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1180         if (!iommu_present(&platform_bus_type))
1181                 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1182 #endif
1183
1184         setup_done = true;
1185         return 0;
1186 }
1187 subsys_initcall(ipmmu_init);