1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
5 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
9 #include <linux/atomic.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
18 #include <linux/io-64-nonatomic-hi-lo.h>
19 #include <linux/io-pgtable.h>
20 #include <linux/iommu.h>
21 #include <linux/iopoll.h>
22 #include <linux/kconfig.h>
23 #include <linux/init.h>
24 #include <linux/mutex.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_iommu.h>
29 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/qcom_scm.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
38 #define SMMU_INTR_SEL_NS 0x2000
40 struct qcom_iommu_ctx;
42 struct qcom_iommu_dev {
43 /* IOMMU core code handle */
44 struct iommu_device iommu;
46 struct clk *iface_clk;
48 void __iomem *local_base;
51 struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */
54 struct qcom_iommu_ctx {
58 u8 asid; /* asid and ctx bank # are 1:1 */
59 struct iommu_domain *domain;
62 struct qcom_iommu_domain {
63 struct io_pgtable_ops *pgtbl_ops;
64 spinlock_t pgtbl_lock;
65 struct mutex init_mutex; /* Protects iommu pointer */
66 struct iommu_domain domain;
67 struct qcom_iommu_dev *iommu;
70 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
72 return container_of(dom, struct qcom_iommu_domain, domain);
75 static const struct iommu_ops qcom_iommu_ops;
77 static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec)
79 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
81 return fwspec->iommu_priv;
84 static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid)
86 struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
89 return qcom_iommu->ctxs[asid - 1];
93 iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
95 writel_relaxed(val, ctx->base + reg);
99 iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
101 writeq_relaxed(val, ctx->base + reg);
105 iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
107 return readl_relaxed(ctx->base + reg);
111 iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
113 return readq_relaxed(ctx->base + reg);
116 static void qcom_iommu_tlb_sync(void *cookie)
118 struct iommu_fwspec *fwspec = cookie;
121 for (i = 0; i < fwspec->num_ids; i++) {
122 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
123 unsigned int val, ret;
125 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
127 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
128 (val & 0x1) == 0, 0, 5000000);
130 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
134 static void qcom_iommu_tlb_inv_context(void *cookie)
136 struct iommu_fwspec *fwspec = cookie;
139 for (i = 0; i < fwspec->num_ids; i++) {
140 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
141 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
144 qcom_iommu_tlb_sync(cookie);
147 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
148 size_t granule, bool leaf, void *cookie)
150 struct iommu_fwspec *fwspec = cookie;
153 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
155 for (i = 0; i < fwspec->num_ids; i++) {
156 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
159 iova = (iova >> 12) << 12;
162 iommu_writel(ctx, reg, iova);
164 } while (s -= granule);
168 static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
169 size_t granule, void *cookie)
171 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
172 qcom_iommu_tlb_sync(cookie);
175 static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
176 size_t granule, void *cookie)
178 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
179 qcom_iommu_tlb_sync(cookie);
182 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
183 unsigned long iova, size_t granule,
186 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
189 static const struct iommu_flush_ops qcom_flush_ops = {
190 .tlb_flush_all = qcom_iommu_tlb_inv_context,
191 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
192 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
193 .tlb_add_page = qcom_iommu_tlb_add_page,
196 static irqreturn_t qcom_iommu_fault(int irq, void *dev)
198 struct qcom_iommu_ctx *ctx = dev;
202 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
204 if (!(fsr & ARM_SMMU_FSR_FAULT))
207 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
208 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
210 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
211 dev_err_ratelimited(ctx->dev,
212 "Unhandled context fault: fsr=0x%x, "
213 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
214 fsr, iova, fsynr, ctx->asid);
217 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
218 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
223 static int qcom_iommu_init_domain(struct iommu_domain *domain,
224 struct qcom_iommu_dev *qcom_iommu,
225 struct iommu_fwspec *fwspec)
227 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
228 struct io_pgtable_ops *pgtbl_ops;
229 struct io_pgtable_cfg pgtbl_cfg;
233 mutex_lock(&qcom_domain->init_mutex);
234 if (qcom_domain->iommu)
237 pgtbl_cfg = (struct io_pgtable_cfg) {
238 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
241 .tlb = &qcom_flush_ops,
242 .iommu_dev = qcom_iommu->dev,
245 qcom_domain->iommu = qcom_iommu;
246 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec);
248 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
250 goto out_clear_iommu;
253 /* Update the domain's page sizes to reflect the page table format */
254 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
255 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
256 domain->geometry.force_aperture = true;
258 for (i = 0; i < fwspec->num_ids; i++) {
259 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
261 if (!ctx->secure_init) {
262 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
264 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
265 goto out_clear_iommu;
267 ctx->secure_init = true;
271 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
272 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
273 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
274 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
277 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
278 arm_smmu_lpae_tcr2(&pgtbl_cfg));
279 iommu_writel(ctx, ARM_SMMU_CB_TCR,
280 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
282 /* MAIRs (stage-1 only) */
283 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
284 pgtbl_cfg.arm_lpae_s1_cfg.mair);
285 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
286 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
289 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
290 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
291 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
292 ARM_SMMU_SCTLR_CFCFG;
294 if (IS_ENABLED(CONFIG_BIG_ENDIAN))
295 reg |= ARM_SMMU_SCTLR_E;
297 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
299 ctx->domain = domain;
302 mutex_unlock(&qcom_domain->init_mutex);
304 /* Publish page table ops for map/unmap */
305 qcom_domain->pgtbl_ops = pgtbl_ops;
310 qcom_domain->iommu = NULL;
312 mutex_unlock(&qcom_domain->init_mutex);
316 static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
318 struct qcom_iommu_domain *qcom_domain;
320 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
323 * Allocate the domain and initialise some of its data structures.
324 * We can't really do anything meaningful until we've added a
327 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
331 if (type == IOMMU_DOMAIN_DMA &&
332 iommu_get_dma_cookie(&qcom_domain->domain)) {
337 mutex_init(&qcom_domain->init_mutex);
338 spin_lock_init(&qcom_domain->pgtbl_lock);
340 return &qcom_domain->domain;
343 static void qcom_iommu_domain_free(struct iommu_domain *domain)
345 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
347 iommu_put_dma_cookie(domain);
349 if (qcom_domain->iommu) {
351 * NOTE: unmap can be called after client device is powered
352 * off, for example, with GPUs or anything involving dma-buf.
353 * So we cannot rely on the device_link. Make sure the IOMMU
354 * is on to avoid unclocked accesses in the TLB inv path:
356 pm_runtime_get_sync(qcom_domain->iommu->dev);
357 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
358 pm_runtime_put_sync(qcom_domain->iommu->dev);
364 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
366 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
367 struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
368 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
372 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
376 /* Ensure that the domain is finalized */
377 pm_runtime_get_sync(qcom_iommu->dev);
378 ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec);
379 pm_runtime_put_sync(qcom_iommu->dev);
384 * Sanity check the domain. We don't support domains across
387 if (qcom_domain->iommu != qcom_iommu) {
388 dev_err(dev, "cannot attach to IOMMU %s while already "
389 "attached to domain on IOMMU %s\n",
390 dev_name(qcom_domain->iommu->dev),
391 dev_name(qcom_iommu->dev));
398 static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
400 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
401 struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
402 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
405 if (WARN_ON(!qcom_domain->iommu))
408 pm_runtime_get_sync(qcom_iommu->dev);
409 for (i = 0; i < fwspec->num_ids; i++) {
410 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
412 /* Disable the context bank: */
413 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
417 pm_runtime_put_sync(qcom_iommu->dev);
420 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
421 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
425 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
426 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
431 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
432 ret = ops->map(ops, iova, paddr, size, prot);
433 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
437 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
438 size_t size, struct iommu_iotlb_gather *gather)
442 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
443 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
448 /* NOTE: unmap can be called after client device is powered off,
449 * for example, with GPUs or anything involving dma-buf. So we
450 * cannot rely on the device_link. Make sure the IOMMU is on to
451 * avoid unclocked accesses in the TLB inv path:
453 pm_runtime_get_sync(qcom_domain->iommu->dev);
454 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
455 ret = ops->unmap(ops, iova, size, gather);
456 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
457 pm_runtime_put_sync(qcom_domain->iommu->dev);
462 static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
464 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
465 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
466 struct io_pgtable, ops);
467 if (!qcom_domain->pgtbl_ops)
470 pm_runtime_get_sync(qcom_domain->iommu->dev);
471 qcom_iommu_tlb_sync(pgtable->cookie);
472 pm_runtime_put_sync(qcom_domain->iommu->dev);
475 static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
476 struct iommu_iotlb_gather *gather)
478 qcom_iommu_flush_iotlb_all(domain);
481 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
486 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
487 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
492 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
493 ret = ops->iova_to_phys(ops, iova);
494 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
499 static bool qcom_iommu_capable(enum iommu_cap cap)
502 case IOMMU_CAP_CACHE_COHERENCY:
504 * Return true here as the SMMU can always send out coherent
508 case IOMMU_CAP_NOEXEC:
515 static int qcom_iommu_add_device(struct device *dev)
517 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
518 struct iommu_group *group;
519 struct device_link *link;
525 * Establish the link between iommu and master, so that the
526 * iommu gets runtime enabled/disabled as per the master's
529 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
531 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
532 dev_name(qcom_iommu->dev), dev_name(dev));
536 group = iommu_group_get_for_dev(dev);
538 return PTR_ERR(group);
540 iommu_group_put(group);
541 iommu_device_link(&qcom_iommu->iommu, dev);
546 static void qcom_iommu_remove_device(struct device *dev)
548 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
553 iommu_device_unlink(&qcom_iommu->iommu, dev);
554 iommu_group_remove_device(dev);
555 iommu_fwspec_free(dev);
558 static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
560 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
561 struct qcom_iommu_dev *qcom_iommu;
562 struct platform_device *iommu_pdev;
563 unsigned asid = args->args[0];
565 if (args->args_count != 1) {
566 dev_err(dev, "incorrect number of iommu params found for %s "
567 "(found %d, expected 1)\n",
568 args->np->full_name, args->args_count);
572 iommu_pdev = of_find_device_by_node(args->np);
573 if (WARN_ON(!iommu_pdev))
576 qcom_iommu = platform_get_drvdata(iommu_pdev);
578 /* make sure the asid specified in dt is valid, so we don't have
579 * to sanity check this elsewhere, since 'asid - 1' is used to
580 * index into qcom_iommu->ctxs:
582 if (WARN_ON(asid < 1) ||
583 WARN_ON(asid > qcom_iommu->num_ctxs))
586 if (!fwspec->iommu_priv) {
587 fwspec->iommu_priv = qcom_iommu;
589 /* make sure devices iommus dt node isn't referring to
590 * multiple different iommu devices. Multiple context
591 * banks are ok, but multiple devices are not:
593 if (WARN_ON(qcom_iommu != fwspec->iommu_priv))
597 return iommu_fwspec_add_ids(dev, &asid, 1);
600 static const struct iommu_ops qcom_iommu_ops = {
601 .capable = qcom_iommu_capable,
602 .domain_alloc = qcom_iommu_domain_alloc,
603 .domain_free = qcom_iommu_domain_free,
604 .attach_dev = qcom_iommu_attach_dev,
605 .detach_dev = qcom_iommu_detach_dev,
606 .map = qcom_iommu_map,
607 .unmap = qcom_iommu_unmap,
608 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
609 .iotlb_sync = qcom_iommu_iotlb_sync,
610 .iova_to_phys = qcom_iommu_iova_to_phys,
611 .add_device = qcom_iommu_add_device,
612 .remove_device = qcom_iommu_remove_device,
613 .device_group = generic_device_group,
614 .of_xlate = qcom_iommu_of_xlate,
615 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
618 static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
622 ret = clk_prepare_enable(qcom_iommu->iface_clk);
624 dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
628 ret = clk_prepare_enable(qcom_iommu->bus_clk);
630 dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
631 clk_disable_unprepare(qcom_iommu->iface_clk);
638 static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
640 clk_disable_unprepare(qcom_iommu->bus_clk);
641 clk_disable_unprepare(qcom_iommu->iface_clk);
644 static int qcom_iommu_sec_ptbl_init(struct device *dev)
647 unsigned int spare = 0;
651 static bool allocated = false;
657 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
659 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
664 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
666 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
668 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
670 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
675 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
677 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
685 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
689 static int get_asid(const struct device_node *np)
693 /* read the "reg" property directly to get the relative address
694 * of the context bank, and calculate the asid from that:
696 if (of_property_read_u32_index(np, "reg", 0, ®))
699 return reg / 0x1000; /* context banks are 0x1000 apart */
702 static int qcom_iommu_ctx_probe(struct platform_device *pdev)
704 struct qcom_iommu_ctx *ctx;
705 struct device *dev = &pdev->dev;
706 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
707 struct resource *res;
710 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
715 platform_set_drvdata(pdev, ctx);
717 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
718 ctx->base = devm_ioremap_resource(dev, res);
719 if (IS_ERR(ctx->base))
720 return PTR_ERR(ctx->base);
722 irq = platform_get_irq(pdev, 0);
726 /* clear IRQs before registering fault handler, just in case the
727 * boot-loader left us a surprise:
729 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
731 ret = devm_request_irq(dev, irq,
737 dev_err(dev, "failed to request IRQ %u\n", irq);
741 ret = get_asid(dev->of_node);
743 dev_err(dev, "missing reg property\n");
749 dev_dbg(dev, "found asid %u\n", ctx->asid);
751 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
756 static int qcom_iommu_ctx_remove(struct platform_device *pdev)
758 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
759 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
761 platform_set_drvdata(pdev, NULL);
763 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
768 static const struct of_device_id ctx_of_match[] = {
769 { .compatible = "qcom,msm-iommu-v1-ns" },
770 { .compatible = "qcom,msm-iommu-v1-sec" },
774 static struct platform_driver qcom_iommu_ctx_driver = {
776 .name = "qcom-iommu-ctx",
777 .of_match_table = of_match_ptr(ctx_of_match),
779 .probe = qcom_iommu_ctx_probe,
780 .remove = qcom_iommu_ctx_remove,
783 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
785 struct device_node *child;
787 for_each_child_of_node(qcom_iommu->dev->of_node, child)
788 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
794 static int qcom_iommu_device_probe(struct platform_device *pdev)
796 struct device_node *child;
797 struct qcom_iommu_dev *qcom_iommu;
798 struct device *dev = &pdev->dev;
799 struct resource *res;
800 int ret, max_asid = 0;
802 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
803 * many child ctx devices we have:
805 for_each_child_of_node(dev->of_node, child)
806 max_asid = max(max_asid, get_asid(child));
808 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
812 qcom_iommu->num_ctxs = max_asid;
813 qcom_iommu->dev = dev;
815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
819 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
820 if (IS_ERR(qcom_iommu->iface_clk)) {
821 dev_err(dev, "failed to get iface clock\n");
822 return PTR_ERR(qcom_iommu->iface_clk);
825 qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
826 if (IS_ERR(qcom_iommu->bus_clk)) {
827 dev_err(dev, "failed to get bus clock\n");
828 return PTR_ERR(qcom_iommu->bus_clk);
831 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
832 &qcom_iommu->sec_id)) {
833 dev_err(dev, "missing qcom,iommu-secure-id property\n");
837 if (qcom_iommu_has_secure_context(qcom_iommu)) {
838 ret = qcom_iommu_sec_ptbl_init(dev);
840 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
845 platform_set_drvdata(pdev, qcom_iommu);
847 pm_runtime_enable(dev);
849 /* register context bank devices, which are child nodes: */
850 ret = devm_of_platform_populate(dev);
852 dev_err(dev, "Failed to populate iommu contexts\n");
856 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
859 dev_err(dev, "Failed to register iommu in sysfs\n");
863 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
864 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
866 ret = iommu_device_register(&qcom_iommu->iommu);
868 dev_err(dev, "Failed to register iommu\n");
872 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
874 if (qcom_iommu->local_base) {
875 pm_runtime_get_sync(dev);
876 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
877 pm_runtime_put_sync(dev);
883 static int qcom_iommu_device_remove(struct platform_device *pdev)
885 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
887 bus_set_iommu(&platform_bus_type, NULL);
889 pm_runtime_force_suspend(&pdev->dev);
890 platform_set_drvdata(pdev, NULL);
891 iommu_device_sysfs_remove(&qcom_iommu->iommu);
892 iommu_device_unregister(&qcom_iommu->iommu);
897 static int __maybe_unused qcom_iommu_resume(struct device *dev)
899 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
901 return qcom_iommu_enable_clocks(qcom_iommu);
904 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
906 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
908 qcom_iommu_disable_clocks(qcom_iommu);
913 static const struct dev_pm_ops qcom_iommu_pm_ops = {
914 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
915 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
916 pm_runtime_force_resume)
919 static const struct of_device_id qcom_iommu_of_match[] = {
920 { .compatible = "qcom,msm-iommu-v1" },
924 static struct platform_driver qcom_iommu_driver = {
926 .name = "qcom-iommu",
927 .of_match_table = of_match_ptr(qcom_iommu_of_match),
928 .pm = &qcom_iommu_pm_ops,
930 .probe = qcom_iommu_device_probe,
931 .remove = qcom_iommu_device_remove,
934 static int __init qcom_iommu_init(void)
938 ret = platform_driver_register(&qcom_iommu_ctx_driver);
942 ret = platform_driver_register(&qcom_iommu_driver);
944 platform_driver_unregister(&qcom_iommu_ctx_driver);
948 device_initcall(qcom_iommu_init);