1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
9 #include <linux/iommu.h>
10 #include <linux/kernel.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/dma-mapping.h>
18 #include <soc/tegra/ahb.h>
19 #include <soc/tegra/mc.h>
21 struct tegra_smmu_group {
22 struct list_head list;
23 struct tegra_smmu *smmu;
24 const struct tegra_smmu_group_soc *soc;
25 struct iommu_group *group;
34 const struct tegra_smmu_soc *soc;
36 struct list_head groups;
38 unsigned long pfn_mask;
39 unsigned long tlb_mask;
44 struct list_head list;
46 struct dentry *debugfs;
48 struct iommu_device iommu; /* IOMMU Core code handle */
51 struct tegra_smmu_as {
52 struct iommu_domain domain;
53 struct tegra_smmu *smmu;
54 unsigned int use_count;
64 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
66 return container_of(dom, struct tegra_smmu_as, domain);
69 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
72 writel(value, smmu->regs + offset);
75 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
77 return readl(smmu->regs + offset);
80 #define SMMU_CONFIG 0x010
81 #define SMMU_CONFIG_ENABLE (1 << 0)
83 #define SMMU_TLB_CONFIG 0x14
84 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
85 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
86 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
87 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
89 #define SMMU_PTC_CONFIG 0x18
90 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
91 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
92 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
94 #define SMMU_PTB_ASID 0x01c
95 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
97 #define SMMU_PTB_DATA 0x020
98 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
100 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
102 #define SMMU_TLB_FLUSH 0x030
103 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
104 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
105 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
106 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
107 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
108 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
109 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
110 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
112 #define SMMU_PTC_FLUSH 0x034
113 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
114 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
116 #define SMMU_PTC_FLUSH_HI 0x9b8
117 #define SMMU_PTC_FLUSH_HI_MASK 0x3
119 /* per-SWGROUP SMMU_*_ASID register */
120 #define SMMU_ASID_ENABLE (1 << 31)
121 #define SMMU_ASID_MASK 0x7f
122 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
124 /* page table definitions */
125 #define SMMU_NUM_PDE 1024
126 #define SMMU_NUM_PTE 1024
128 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
129 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
131 #define SMMU_PDE_SHIFT 22
132 #define SMMU_PTE_SHIFT 12
134 #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
135 #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
136 #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
137 #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
139 #define SMMU_PD_READABLE (1 << 31)
140 #define SMMU_PD_WRITABLE (1 << 30)
141 #define SMMU_PD_NONSECURE (1 << 29)
143 #define SMMU_PDE_READABLE (1 << 31)
144 #define SMMU_PDE_WRITABLE (1 << 30)
145 #define SMMU_PDE_NONSECURE (1 << 29)
146 #define SMMU_PDE_NEXT (1 << 28)
148 #define SMMU_PTE_READABLE (1 << 31)
149 #define SMMU_PTE_WRITABLE (1 << 30)
150 #define SMMU_PTE_NONSECURE (1 << 29)
152 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
155 static unsigned int iova_pd_index(unsigned long iova)
157 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
160 static unsigned int iova_pt_index(unsigned long iova)
162 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
165 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
168 return (addr & smmu->pfn_mask) == addr;
171 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
173 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
176 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
178 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
181 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
182 unsigned long offset)
186 offset &= ~(smmu->mc->soc->atom_size - 1);
188 if (smmu->mc->soc->num_address_bits > 32) {
189 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
190 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
194 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
197 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
198 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
201 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
203 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
206 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
211 if (smmu->soc->num_asids == 4)
212 value = (asid & 0x3) << 29;
214 value = (asid & 0x7f) << 24;
216 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
217 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
220 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
226 if (smmu->soc->num_asids == 4)
227 value = (asid & 0x3) << 29;
229 value = (asid & 0x7f) << 24;
231 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
232 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
235 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
241 if (smmu->soc->num_asids == 4)
242 value = (asid & 0x3) << 29;
244 value = (asid & 0x7f) << 24;
246 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
247 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
250 static inline void smmu_flush(struct tegra_smmu *smmu)
252 smmu_readl(smmu, SMMU_PTB_ASID);
255 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
259 mutex_lock(&smmu->lock);
261 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
262 if (id >= smmu->soc->num_asids) {
263 mutex_unlock(&smmu->lock);
267 set_bit(id, smmu->asids);
270 mutex_unlock(&smmu->lock);
274 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
276 mutex_lock(&smmu->lock);
277 clear_bit(id, smmu->asids);
278 mutex_unlock(&smmu->lock);
281 static bool tegra_smmu_capable(enum iommu_cap cap)
286 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
288 struct tegra_smmu_as *as;
290 if (type != IOMMU_DOMAIN_UNMANAGED)
293 as = kzalloc(sizeof(*as), GFP_KERNEL);
297 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
299 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
305 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
312 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
320 spin_lock_init(&as->lock);
323 as->domain.geometry.aperture_start = 0;
324 as->domain.geometry.aperture_end = 0xffffffff;
325 as->domain.geometry.force_aperture = true;
330 static void tegra_smmu_domain_free(struct iommu_domain *domain)
332 struct tegra_smmu_as *as = to_smmu_as(domain);
334 /* TODO: free page directory and page tables */
336 WARN_ON_ONCE(as->use_count);
342 static const struct tegra_smmu_swgroup *
343 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
345 const struct tegra_smmu_swgroup *group = NULL;
348 for (i = 0; i < smmu->soc->num_swgroups; i++) {
349 if (smmu->soc->swgroups[i].swgroup == swgroup) {
350 group = &smmu->soc->swgroups[i];
358 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
361 const struct tegra_smmu_swgroup *group;
365 group = tegra_smmu_find_swgroup(smmu, swgroup);
367 value = smmu_readl(smmu, group->reg);
368 value &= ~SMMU_ASID_MASK;
369 value |= SMMU_ASID_VALUE(asid);
370 value |= SMMU_ASID_ENABLE;
371 smmu_writel(smmu, value, group->reg);
373 pr_warn("%s group from swgroup %u not found\n", __func__,
375 /* No point moving ahead if group was not found */
379 for (i = 0; i < smmu->soc->num_clients; i++) {
380 const struct tegra_mc_client *client = &smmu->soc->clients[i];
382 if (client->swgroup != swgroup)
385 value = smmu_readl(smmu, client->smmu.reg);
386 value |= BIT(client->smmu.bit);
387 smmu_writel(smmu, value, client->smmu.reg);
391 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
394 const struct tegra_smmu_swgroup *group;
398 group = tegra_smmu_find_swgroup(smmu, swgroup);
400 value = smmu_readl(smmu, group->reg);
401 value &= ~SMMU_ASID_MASK;
402 value |= SMMU_ASID_VALUE(asid);
403 value &= ~SMMU_ASID_ENABLE;
404 smmu_writel(smmu, value, group->reg);
407 for (i = 0; i < smmu->soc->num_clients; i++) {
408 const struct tegra_mc_client *client = &smmu->soc->clients[i];
410 if (client->swgroup != swgroup)
413 value = smmu_readl(smmu, client->smmu.reg);
414 value &= ~BIT(client->smmu.bit);
415 smmu_writel(smmu, value, client->smmu.reg);
419 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
420 struct tegra_smmu_as *as)
425 if (as->use_count > 0) {
430 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
432 if (dma_mapping_error(smmu->dev, as->pd_dma))
435 /* We can't handle 64-bit DMA addresses */
436 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
441 err = tegra_smmu_alloc_asid(smmu, &as->id);
445 smmu_flush_ptc(smmu, as->pd_dma, 0);
446 smmu_flush_tlb_asid(smmu, as->id);
448 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
449 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
450 smmu_writel(smmu, value, SMMU_PTB_DATA);
459 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
463 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
464 struct tegra_smmu_as *as)
466 if (--as->use_count > 0)
469 tegra_smmu_free_asid(smmu, as->id);
471 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
476 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
479 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
480 struct tegra_smmu_as *as = to_smmu_as(domain);
481 struct device_node *np = dev->of_node;
482 struct of_phandle_args args;
483 unsigned int index = 0;
486 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
488 unsigned int swgroup = args.args[0];
490 if (args.np != smmu->dev->of_node) {
491 of_node_put(args.np);
495 of_node_put(args.np);
497 err = tegra_smmu_as_prepare(smmu, as);
501 tegra_smmu_enable(smmu, swgroup, as->id);
511 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
513 struct tegra_smmu_as *as = to_smmu_as(domain);
514 struct device_node *np = dev->of_node;
515 struct tegra_smmu *smmu = as->smmu;
516 struct of_phandle_args args;
517 unsigned int index = 0;
519 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
521 unsigned int swgroup = args.args[0];
523 if (args.np != smmu->dev->of_node) {
524 of_node_put(args.np);
528 of_node_put(args.np);
530 tegra_smmu_disable(smmu, swgroup, as->id);
531 tegra_smmu_as_unprepare(smmu, as);
536 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
539 unsigned int pd_index = iova_pd_index(iova);
540 struct tegra_smmu *smmu = as->smmu;
541 u32 *pd = page_address(as->pd);
542 unsigned long offset = pd_index * sizeof(*pd);
544 /* Set the page directory entry first */
545 pd[pd_index] = value;
547 /* The flush the page directory entry from caches */
548 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
549 sizeof(*pd), DMA_TO_DEVICE);
551 /* And flush the iommu */
552 smmu_flush_ptc(smmu, as->pd_dma, offset);
553 smmu_flush_tlb_section(smmu, as->id, iova);
557 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
559 u32 *pt = page_address(pt_page);
561 return pt + iova_pt_index(iova);
564 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
567 unsigned int pd_index = iova_pd_index(iova);
568 struct tegra_smmu *smmu = as->smmu;
569 struct page *pt_page;
572 pt_page = as->pts[pd_index];
576 pd = page_address(as->pd);
577 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
579 return tegra_smmu_pte_offset(pt_page, iova);
582 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
583 dma_addr_t *dmap, struct page *page)
585 unsigned int pde = iova_pd_index(iova);
586 struct tegra_smmu *smmu = as->smmu;
591 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
593 if (dma_mapping_error(smmu->dev, dma)) {
598 if (!smmu_dma_addr_valid(smmu, dma)) {
599 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
607 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
612 u32 *pd = page_address(as->pd);
614 *dmap = smmu_pde_to_dma(smmu, pd[pde]);
617 return tegra_smmu_pte_offset(as->pts[pde], iova);
620 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
622 unsigned int pd_index = iova_pd_index(iova);
624 as->count[pd_index]++;
627 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
629 unsigned int pde = iova_pd_index(iova);
630 struct page *page = as->pts[pde];
633 * When no entries in this page table are used anymore, return the
634 * memory page to the system.
636 if (--as->count[pde] == 0) {
637 struct tegra_smmu *smmu = as->smmu;
638 u32 *pd = page_address(as->pd);
639 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
641 tegra_smmu_set_pde(as, iova, 0);
643 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
649 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
650 u32 *pte, dma_addr_t pte_dma, u32 val)
652 struct tegra_smmu *smmu = as->smmu;
653 unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
657 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
659 smmu_flush_ptc(smmu, pte_dma, offset);
660 smmu_flush_tlb_group(smmu, as->id, iova);
664 static struct page *as_get_pde_page(struct tegra_smmu_as *as,
665 unsigned long iova, gfp_t gfp,
666 unsigned long *flags)
668 unsigned int pde = iova_pd_index(iova);
669 struct page *page = as->pts[pde];
671 /* at first check whether allocation needs to be done at all */
676 * In order to prevent exhaustion of the atomic memory pool, we
677 * allocate page in a sleeping context if GFP flags permit. Hence
678 * spinlock needs to be unlocked and re-locked after allocation.
680 if (!(gfp & __GFP_ATOMIC))
681 spin_unlock_irqrestore(&as->lock, *flags);
683 page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
685 if (!(gfp & __GFP_ATOMIC))
686 spin_lock_irqsave(&as->lock, *flags);
689 * In a case of blocking allocation, a concurrent mapping may win
690 * the PDE allocation. In this case the allocated page isn't needed
691 * if allocation succeeded and the allocation failure isn't fatal.
704 __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
705 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
706 unsigned long *flags)
708 struct tegra_smmu_as *as = to_smmu_as(domain);
714 page = as_get_pde_page(as, iova, gfp, flags);
718 pte = as_get_pte(as, iova, &pte_dma, page);
722 /* If we aren't overwriting a pre-existing entry, increment use */
724 tegra_smmu_pte_get_use(as, iova);
726 pte_attrs = SMMU_PTE_NONSECURE;
728 if (prot & IOMMU_READ)
729 pte_attrs |= SMMU_PTE_READABLE;
731 if (prot & IOMMU_WRITE)
732 pte_attrs |= SMMU_PTE_WRITABLE;
734 tegra_smmu_set_pte(as, iova, pte, pte_dma,
735 SMMU_PHYS_PFN(paddr) | pte_attrs);
741 __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
742 size_t size, struct iommu_iotlb_gather *gather)
744 struct tegra_smmu_as *as = to_smmu_as(domain);
748 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
752 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
753 tegra_smmu_pte_put_use(as, iova);
758 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
759 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
761 struct tegra_smmu_as *as = to_smmu_as(domain);
765 spin_lock_irqsave(&as->lock, flags);
766 ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
767 spin_unlock_irqrestore(&as->lock, flags);
772 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
773 size_t size, struct iommu_iotlb_gather *gather)
775 struct tegra_smmu_as *as = to_smmu_as(domain);
778 spin_lock_irqsave(&as->lock, flags);
779 size = __tegra_smmu_unmap(domain, iova, size, gather);
780 spin_unlock_irqrestore(&as->lock, flags);
785 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
788 struct tegra_smmu_as *as = to_smmu_as(domain);
793 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
797 pfn = *pte & as->smmu->pfn_mask;
799 return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
802 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
804 struct platform_device *pdev;
807 pdev = of_find_device_by_node(np);
811 mc = platform_get_drvdata(pdev);
818 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
819 struct of_phandle_args *args)
821 const struct iommu_ops *ops = smmu->iommu.ops;
824 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
826 dev_err(dev, "failed to initialize fwspec: %d\n", err);
830 err = ops->of_xlate(dev, args);
832 dev_err(dev, "failed to parse SW group ID: %d\n", err);
833 iommu_fwspec_free(dev);
840 static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
842 struct device_node *np = dev->of_node;
843 struct tegra_smmu *smmu = NULL;
844 struct of_phandle_args args;
845 unsigned int index = 0;
848 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
850 smmu = tegra_smmu_find(args.np);
852 err = tegra_smmu_configure(smmu, dev, &args);
853 of_node_put(args.np);
859 * Only a single IOMMU master interface is currently
860 * supported by the Linux kernel, so abort after the
863 dev_iommu_priv_set(dev, smmu);
868 of_node_put(args.np);
873 return ERR_PTR(-ENODEV);
878 static void tegra_smmu_release_device(struct device *dev)
880 dev_iommu_priv_set(dev, NULL);
883 static const struct tegra_smmu_group_soc *
884 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
888 for (i = 0; i < smmu->soc->num_groups; i++)
889 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
890 if (smmu->soc->groups[i].swgroups[j] == swgroup)
891 return &smmu->soc->groups[i];
896 static void tegra_smmu_group_release(void *iommu_data)
898 struct tegra_smmu_group *group = iommu_data;
899 struct tegra_smmu *smmu = group->smmu;
901 mutex_lock(&smmu->lock);
902 list_del(&group->list);
903 mutex_unlock(&smmu->lock);
906 static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
907 unsigned int swgroup)
909 const struct tegra_smmu_group_soc *soc;
910 struct tegra_smmu_group *group;
911 struct iommu_group *grp;
913 /* Find group_soc associating with swgroup */
914 soc = tegra_smmu_find_group(smmu, swgroup);
916 mutex_lock(&smmu->lock);
918 /* Find existing iommu_group associating with swgroup or group_soc */
919 list_for_each_entry(group, &smmu->groups, list)
920 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
921 grp = iommu_group_ref_get(group->group);
922 mutex_unlock(&smmu->lock);
926 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
928 mutex_unlock(&smmu->lock);
932 INIT_LIST_HEAD(&group->list);
933 group->swgroup = swgroup;
937 group->group = iommu_group_alloc();
938 if (IS_ERR(group->group)) {
939 devm_kfree(smmu->dev, group);
940 mutex_unlock(&smmu->lock);
944 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
946 iommu_group_set_name(group->group, soc->name);
947 list_add_tail(&group->list, &smmu->groups);
948 mutex_unlock(&smmu->lock);
953 static struct iommu_group *tegra_smmu_device_group(struct device *dev)
955 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
956 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
957 struct iommu_group *group;
959 group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
961 group = generic_device_group(dev);
966 static int tegra_smmu_of_xlate(struct device *dev,
967 struct of_phandle_args *args)
969 u32 id = args->args[0];
971 return iommu_fwspec_add_ids(dev, &id, 1);
974 static const struct iommu_ops tegra_smmu_ops = {
975 .capable = tegra_smmu_capable,
976 .domain_alloc = tegra_smmu_domain_alloc,
977 .domain_free = tegra_smmu_domain_free,
978 .attach_dev = tegra_smmu_attach_dev,
979 .detach_dev = tegra_smmu_detach_dev,
980 .probe_device = tegra_smmu_probe_device,
981 .release_device = tegra_smmu_release_device,
982 .device_group = tegra_smmu_device_group,
983 .map = tegra_smmu_map,
984 .unmap = tegra_smmu_unmap,
985 .iova_to_phys = tegra_smmu_iova_to_phys,
986 .of_xlate = tegra_smmu_of_xlate,
987 .pgsize_bitmap = SZ_4K,
990 static void tegra_smmu_ahb_enable(void)
992 static const struct of_device_id ahb_match[] = {
993 { .compatible = "nvidia,tegra30-ahb", },
996 struct device_node *ahb;
998 ahb = of_find_matching_node(NULL, ahb_match);
1000 tegra_ahb_enable_smmu(ahb);
1005 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
1007 struct tegra_smmu *smmu = s->private;
1011 seq_printf(s, "swgroup enabled ASID\n");
1012 seq_printf(s, "------------------------\n");
1014 for (i = 0; i < smmu->soc->num_swgroups; i++) {
1015 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1019 value = smmu_readl(smmu, group->reg);
1021 if (value & SMMU_ASID_ENABLE)
1026 asid = value & SMMU_ASID_MASK;
1028 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
1035 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
1037 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1039 struct tegra_smmu *smmu = s->private;
1043 seq_printf(s, "client enabled\n");
1044 seq_printf(s, "--------------------\n");
1046 for (i = 0; i < smmu->soc->num_clients; i++) {
1047 const struct tegra_mc_client *client = &smmu->soc->clients[i];
1050 value = smmu_readl(smmu, client->smmu.reg);
1052 if (value & BIT(client->smmu.bit))
1057 seq_printf(s, "%-12s %s\n", client->name, status);
1063 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
1065 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1067 smmu->debugfs = debugfs_create_dir("smmu", NULL);
1071 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1072 &tegra_smmu_swgroups_fops);
1073 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1074 &tegra_smmu_clients_fops);
1077 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1079 debugfs_remove_recursive(smmu->debugfs);
1082 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1083 const struct tegra_smmu_soc *soc,
1084 struct tegra_mc *mc)
1086 struct tegra_smmu *smmu;
1091 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1093 return ERR_PTR(-ENOMEM);
1096 * This is a bit of a hack. Ideally we'd want to simply return this
1097 * value. However the IOMMU registration process will attempt to add
1098 * all devices to the IOMMU when bus_set_iommu() is called. In order
1099 * not to rely on global variables to track the IOMMU instance, we
1100 * set it here so that it can be looked up from the .probe_device()
1101 * callback via the IOMMU device's .drvdata field.
1105 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
1107 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1109 return ERR_PTR(-ENOMEM);
1111 INIT_LIST_HEAD(&smmu->groups);
1112 mutex_init(&smmu->lock);
1114 smmu->regs = mc->regs;
1120 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
1121 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1122 mc->soc->num_address_bits, smmu->pfn_mask);
1123 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1124 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1127 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1129 if (soc->supports_request_limit)
1130 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1132 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1134 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1135 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1137 if (soc->supports_round_robin_arbitration)
1138 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1140 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1142 smmu_flush_ptc_all(smmu);
1143 smmu_flush_tlb(smmu);
1144 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1147 tegra_smmu_ahb_enable();
1149 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1151 return ERR_PTR(err);
1153 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
1154 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
1156 err = iommu_device_register(&smmu->iommu);
1158 iommu_device_sysfs_remove(&smmu->iommu);
1159 return ERR_PTR(err);
1162 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1164 iommu_device_unregister(&smmu->iommu);
1165 iommu_device_sysfs_remove(&smmu->iommu);
1166 return ERR_PTR(err);
1169 if (IS_ENABLED(CONFIG_DEBUG_FS))
1170 tegra_smmu_debugfs_init(smmu);
1175 void tegra_smmu_remove(struct tegra_smmu *smmu)
1177 iommu_device_unregister(&smmu->iommu);
1178 iommu_device_sysfs_remove(&smmu->iommu);
1180 if (IS_ENABLED(CONFIG_DEBUG_FS))
1181 tegra_smmu_debugfs_exit(smmu);