2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
14 #include <linux/clk.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
18 #include <linux/iommu.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/dma-iommu.h>
29 typedef u32 sysmmu_iova_t;
30 typedef u32 sysmmu_pte_t;
32 /* We do not consider super section mapping (16MB) */
34 #define LPAGE_ORDER 16
35 #define SPAGE_ORDER 12
37 #define SECT_SIZE (1 << SECT_ORDER)
38 #define LPAGE_SIZE (1 << LPAGE_ORDER)
39 #define SPAGE_SIZE (1 << SPAGE_ORDER)
41 #define SECT_MASK (~(SECT_SIZE - 1))
42 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
43 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
51 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
55 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
57 #ifdef CONFIG_BIG_ENDIAN
58 #warning "revisit driver if we can enable big-endian ptes"
62 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
63 * v5.0 introduced support for 36bit physical address space by shifting
64 * all page entry values by 4 bits.
65 * All SYSMMU controllers in the system support the address spaces of the same
66 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
69 static short PG_ENT_SHIFT = -1;
70 #define SYSMMU_PG_ENT_SHIFT 0
71 #define SYSMMU_V5_PG_ENT_SHIFT 4
73 static const sysmmu_pte_t *LV1_PROT;
74 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
75 ((0 << 15) | (0 << 10)), /* no access */
76 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
77 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
78 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
80 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
81 (0 << 4), /* no access */
82 (1 << 4), /* IOMMU_READ only */
83 (2 << 4), /* IOMMU_WRITE only */
84 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
87 static const sysmmu_pte_t *LV2_PROT;
88 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
89 ((0 << 9) | (0 << 4)), /* no access */
90 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
91 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
92 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
94 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
95 (0 << 2), /* no access */
96 (1 << 2), /* IOMMU_READ only */
97 (2 << 2), /* IOMMU_WRITE only */
98 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
101 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
103 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
104 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
105 #define section_offs(iova) (iova & (SECT_SIZE - 1))
106 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
107 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
108 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
109 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
111 #define NUM_LV1ENTRIES 4096
112 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
114 static u32 lv1ent_offset(sysmmu_iova_t iova)
116 return iova >> SECT_ORDER;
119 static u32 lv2ent_offset(sysmmu_iova_t iova)
121 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
124 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
125 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
127 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
128 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
130 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
131 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
132 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
133 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
135 #define CTRL_ENABLE 0x5
136 #define CTRL_BLOCK 0x7
137 #define CTRL_DISABLE 0x0
140 #define CFG_EAP (1 << 2)
141 #define CFG_QOS(n) ((n & 0xF) << 7)
142 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
143 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
144 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
146 /* common registers */
147 #define REG_MMU_CTRL 0x000
148 #define REG_MMU_CFG 0x004
149 #define REG_MMU_STATUS 0x008
150 #define REG_MMU_VERSION 0x034
152 #define MMU_MAJ_VER(val) ((val) >> 7)
153 #define MMU_MIN_VER(val) ((val) & 0x7F)
154 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
156 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
158 /* v1.x - v3.x registers */
159 #define REG_MMU_FLUSH 0x00C
160 #define REG_MMU_FLUSH_ENTRY 0x010
161 #define REG_PT_BASE_ADDR 0x014
162 #define REG_INT_STATUS 0x018
163 #define REG_INT_CLEAR 0x01C
165 #define REG_PAGE_FAULT_ADDR 0x024
166 #define REG_AW_FAULT_ADDR 0x028
167 #define REG_AR_FAULT_ADDR 0x02C
168 #define REG_DEFAULT_SLAVE_ADDR 0x030
171 #define REG_V5_PT_BASE_PFN 0x00C
172 #define REG_V5_MMU_FLUSH_ALL 0x010
173 #define REG_V5_MMU_FLUSH_ENTRY 0x014
174 #define REG_V5_INT_STATUS 0x060
175 #define REG_V5_INT_CLEAR 0x064
176 #define REG_V5_FAULT_AR_VA 0x070
177 #define REG_V5_FAULT_AW_VA 0x080
179 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
181 static struct device *dma_dev;
182 static struct kmem_cache *lv2table_kmem_cache;
183 static sysmmu_pte_t *zero_lv2_table;
184 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
186 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
188 return pgtable + lv1ent_offset(iova);
191 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
193 return (sysmmu_pte_t *)phys_to_virt(
194 lv2table_base(sent)) + lv2ent_offset(iova);
198 * IOMMU fault information register
200 struct sysmmu_fault_info {
201 unsigned int bit; /* bit number in STATUS register */
202 unsigned short addr_reg; /* register to read VA fault address */
203 const char *name; /* human readable fault name */
204 unsigned int type; /* fault type for report_iommu_fault */
207 static const struct sysmmu_fault_info sysmmu_faults[] = {
208 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
209 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
210 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
211 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
212 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
213 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
214 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
215 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
218 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
219 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
220 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
221 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
222 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
223 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
224 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
225 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
226 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
227 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
228 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
232 * This structure is attached to dev.archdata.iommu of the master device
233 * on device add, contains a list of SYSMMU controllers defined by device tree,
234 * which are bound to given master device. It is usually referenced by 'owner'
237 struct exynos_iommu_owner {
238 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
239 struct iommu_domain *domain; /* domain this device is attached */
240 struct mutex rpm_lock; /* for runtime pm of all sysmmus */
244 * This structure exynos specific generalization of struct iommu_domain.
245 * It contains list of SYSMMU controllers from all master devices, which has
246 * been attached to this domain and page tables of IO address space defined by
247 * it. It is usually referenced by 'domain' pointer.
249 struct exynos_iommu_domain {
250 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
251 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
252 short *lv2entcnt; /* free lv2 entry counter for each section */
253 spinlock_t lock; /* lock for modyfying list of clients */
254 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
255 struct iommu_domain domain; /* generic domain data structure */
259 * This structure hold all data of a single SYSMMU controller, this includes
260 * hw resources like registers and clocks, pointers and list nodes to connect
261 * it to all other structures, internal state and parameters read from device
262 * tree. It is usually referenced by 'data' pointer.
264 struct sysmmu_drvdata {
265 struct device *sysmmu; /* SYSMMU controller device */
266 struct device *master; /* master device (owner) */
267 void __iomem *sfrbase; /* our registers */
268 struct clk *clk; /* SYSMMU's clock */
269 struct clk *aclk; /* SYSMMU's aclk clock */
270 struct clk *pclk; /* SYSMMU's pclk clock */
271 struct clk *clk_master; /* master's device clock */
272 spinlock_t lock; /* lock for modyfying state */
273 bool active; /* current status */
274 struct exynos_iommu_domain *domain; /* domain we belong to */
275 struct list_head domain_node; /* node for domain clients list */
276 struct list_head owner_node; /* node for owner controllers list */
277 phys_addr_t pgtable; /* assigned page table structure */
278 unsigned int version; /* our version */
280 struct iommu_device iommu; /* IOMMU core handle */
283 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
285 return container_of(dom, struct exynos_iommu_domain, domain);
288 static void sysmmu_unblock(struct sysmmu_drvdata *data)
290 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
293 static bool sysmmu_block(struct sysmmu_drvdata *data)
297 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
298 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
301 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
302 sysmmu_unblock(data);
309 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
311 if (MMU_MAJ_VER(data->version) < 5)
312 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
314 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
317 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
318 sysmmu_iova_t iova, unsigned int num_inv)
322 for (i = 0; i < num_inv; i++) {
323 if (MMU_MAJ_VER(data->version) < 5)
324 writel((iova & SPAGE_MASK) | 1,
325 data->sfrbase + REG_MMU_FLUSH_ENTRY);
327 writel((iova & SPAGE_MASK) | 1,
328 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
333 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
335 if (MMU_MAJ_VER(data->version) < 5)
336 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
338 writel(pgd >> PAGE_SHIFT,
339 data->sfrbase + REG_V5_PT_BASE_PFN);
341 __sysmmu_tlb_invalidate(data);
344 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
346 BUG_ON(clk_prepare_enable(data->clk_master));
347 BUG_ON(clk_prepare_enable(data->clk));
348 BUG_ON(clk_prepare_enable(data->pclk));
349 BUG_ON(clk_prepare_enable(data->aclk));
352 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
354 clk_disable_unprepare(data->aclk);
355 clk_disable_unprepare(data->pclk);
356 clk_disable_unprepare(data->clk);
357 clk_disable_unprepare(data->clk_master);
360 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
364 __sysmmu_enable_clocks(data);
366 ver = readl(data->sfrbase + REG_MMU_VERSION);
368 /* controllers on some SoCs don't report proper version */
369 if (ver == 0x80000001u)
370 data->version = MAKE_MMU_VER(1, 0);
372 data->version = MMU_RAW_VER(ver);
374 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
375 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
377 __sysmmu_disable_clocks(data);
380 static void show_fault_information(struct sysmmu_drvdata *data,
381 const struct sysmmu_fault_info *finfo,
382 sysmmu_iova_t fault_addr)
386 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
387 finfo->name, fault_addr, &data->pgtable);
388 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
389 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
390 if (lv1ent_page(ent)) {
391 ent = page_entry(ent, fault_addr);
392 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
396 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
398 /* SYSMMU is in blocked state when interrupt occurred. */
399 struct sysmmu_drvdata *data = dev_id;
400 const struct sysmmu_fault_info *finfo;
401 unsigned int i, n, itype;
402 sysmmu_iova_t fault_addr = -1;
403 unsigned short reg_status, reg_clear;
406 WARN_ON(!data->active);
408 if (MMU_MAJ_VER(data->version) < 5) {
409 reg_status = REG_INT_STATUS;
410 reg_clear = REG_INT_CLEAR;
411 finfo = sysmmu_faults;
412 n = ARRAY_SIZE(sysmmu_faults);
414 reg_status = REG_V5_INT_STATUS;
415 reg_clear = REG_V5_INT_CLEAR;
416 finfo = sysmmu_v5_faults;
417 n = ARRAY_SIZE(sysmmu_v5_faults);
420 spin_lock(&data->lock);
422 clk_enable(data->clk_master);
424 itype = __ffs(readl(data->sfrbase + reg_status));
425 for (i = 0; i < n; i++, finfo++)
426 if (finfo->bit == itype)
428 /* unknown/unsupported fault */
431 /* print debug message */
432 fault_addr = readl(data->sfrbase + finfo->addr_reg);
433 show_fault_information(data, finfo, fault_addr);
436 ret = report_iommu_fault(&data->domain->domain,
437 data->master, fault_addr, finfo->type);
438 /* fault is not recovered by fault handler */
441 writel(1 << itype, data->sfrbase + reg_clear);
443 sysmmu_unblock(data);
445 clk_disable(data->clk_master);
447 spin_unlock(&data->lock);
452 static void __sysmmu_disable(struct sysmmu_drvdata *data)
456 clk_enable(data->clk_master);
458 spin_lock_irqsave(&data->lock, flags);
459 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
460 writel(0, data->sfrbase + REG_MMU_CFG);
461 data->active = false;
462 spin_unlock_irqrestore(&data->lock, flags);
464 __sysmmu_disable_clocks(data);
467 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
471 if (data->version <= MAKE_MMU_VER(3, 1))
472 cfg = CFG_LRU | CFG_QOS(15);
473 else if (data->version <= MAKE_MMU_VER(3, 2))
474 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
476 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
478 cfg |= CFG_EAP; /* enable access protection bits check */
480 writel(cfg, data->sfrbase + REG_MMU_CFG);
483 static void __sysmmu_enable(struct sysmmu_drvdata *data)
487 __sysmmu_enable_clocks(data);
489 spin_lock_irqsave(&data->lock, flags);
490 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
491 __sysmmu_init_config(data);
492 __sysmmu_set_ptbase(data, data->pgtable);
493 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
495 spin_unlock_irqrestore(&data->lock, flags);
498 * SYSMMU driver keeps master's clock enabled only for the short
499 * time, while accessing the registers. For performing address
500 * translation during DMA transaction it relies on the client
501 * driver to enable it.
503 clk_disable(data->clk_master);
506 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
511 spin_lock_irqsave(&data->lock, flags);
512 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
513 clk_enable(data->clk_master);
514 __sysmmu_tlb_invalidate_entry(data, iova, 1);
515 clk_disable(data->clk_master);
517 spin_unlock_irqrestore(&data->lock, flags);
520 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
521 sysmmu_iova_t iova, size_t size)
525 spin_lock_irqsave(&data->lock, flags);
527 unsigned int num_inv = 1;
529 clk_enable(data->clk_master);
532 * L2TLB invalidation required
533 * 4KB page: 1 invalidation
534 * 64KB page: 16 invalidations
535 * 1MB page: 64 invalidations
536 * because it is set-associative TLB
537 * with 8-way and 64 sets.
538 * 1MB page can be cached in one of all sets.
539 * 64KB page can be one of 16 consecutive sets.
541 if (MMU_MAJ_VER(data->version) == 2)
542 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
544 if (sysmmu_block(data)) {
545 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
546 sysmmu_unblock(data);
548 clk_disable(data->clk_master);
550 spin_unlock_irqrestore(&data->lock, flags);
553 static struct iommu_ops exynos_iommu_ops;
555 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
558 struct device *dev = &pdev->dev;
559 struct sysmmu_drvdata *data;
560 struct resource *res;
562 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
566 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
567 data->sfrbase = devm_ioremap_resource(dev, res);
568 if (IS_ERR(data->sfrbase))
569 return PTR_ERR(data->sfrbase);
571 irq = platform_get_irq(pdev, 0);
573 dev_err(dev, "Unable to find IRQ resource\n");
577 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
578 dev_name(dev), data);
580 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
584 data->clk = devm_clk_get(dev, "sysmmu");
585 if (PTR_ERR(data->clk) == -ENOENT)
587 else if (IS_ERR(data->clk))
588 return PTR_ERR(data->clk);
590 data->aclk = devm_clk_get(dev, "aclk");
591 if (PTR_ERR(data->aclk) == -ENOENT)
593 else if (IS_ERR(data->aclk))
594 return PTR_ERR(data->aclk);
596 data->pclk = devm_clk_get(dev, "pclk");
597 if (PTR_ERR(data->pclk) == -ENOENT)
599 else if (IS_ERR(data->pclk))
600 return PTR_ERR(data->pclk);
602 if (!data->clk && (!data->aclk || !data->pclk)) {
603 dev_err(dev, "Failed to get device clock(s)!\n");
607 data->clk_master = devm_clk_get(dev, "master");
608 if (PTR_ERR(data->clk_master) == -ENOENT)
609 data->clk_master = NULL;
610 else if (IS_ERR(data->clk_master))
611 return PTR_ERR(data->clk_master);
614 spin_lock_init(&data->lock);
616 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
617 dev_name(data->sysmmu));
621 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
622 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
624 ret = iommu_device_register(&data->iommu);
628 platform_set_drvdata(pdev, data);
630 __sysmmu_get_version(data);
631 if (PG_ENT_SHIFT < 0) {
632 if (MMU_MAJ_VER(data->version) < 5) {
633 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
634 LV1_PROT = SYSMMU_LV1_PROT;
635 LV2_PROT = SYSMMU_LV2_PROT;
637 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
638 LV1_PROT = SYSMMU_V5_LV1_PROT;
639 LV2_PROT = SYSMMU_V5_LV2_PROT;
643 pm_runtime_enable(dev);
648 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
650 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
651 struct device *master = data->master;
654 struct exynos_iommu_owner *owner = master->archdata.iommu;
656 mutex_lock(&owner->rpm_lock);
658 dev_dbg(data->sysmmu, "saving state\n");
659 __sysmmu_disable(data);
661 mutex_unlock(&owner->rpm_lock);
666 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
668 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
669 struct device *master = data->master;
672 struct exynos_iommu_owner *owner = master->archdata.iommu;
674 mutex_lock(&owner->rpm_lock);
676 dev_dbg(data->sysmmu, "restoring state\n");
677 __sysmmu_enable(data);
679 mutex_unlock(&owner->rpm_lock);
684 static const struct dev_pm_ops sysmmu_pm_ops = {
685 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
686 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
687 pm_runtime_force_resume)
690 static const struct of_device_id sysmmu_of_match[] __initconst = {
691 { .compatible = "samsung,exynos-sysmmu", },
695 static struct platform_driver exynos_sysmmu_driver __refdata = {
696 .probe = exynos_sysmmu_probe,
698 .name = "exynos-sysmmu",
699 .of_match_table = sysmmu_of_match,
700 .pm = &sysmmu_pm_ops,
701 .suppress_bind_attrs = true,
705 static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
707 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
709 *ent = cpu_to_le32(val);
710 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
714 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
716 struct exynos_iommu_domain *domain;
720 /* Check if correct PTE offsets are initialized */
721 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
723 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
727 if (type == IOMMU_DOMAIN_DMA) {
728 if (iommu_get_dma_cookie(&domain->domain) != 0)
730 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
734 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
735 if (!domain->pgtable)
738 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
739 if (!domain->lv2entcnt)
742 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
743 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
744 domain->pgtable[i + 0] = ZERO_LV2LINK;
745 domain->pgtable[i + 1] = ZERO_LV2LINK;
746 domain->pgtable[i + 2] = ZERO_LV2LINK;
747 domain->pgtable[i + 3] = ZERO_LV2LINK;
748 domain->pgtable[i + 4] = ZERO_LV2LINK;
749 domain->pgtable[i + 5] = ZERO_LV2LINK;
750 domain->pgtable[i + 6] = ZERO_LV2LINK;
751 domain->pgtable[i + 7] = ZERO_LV2LINK;
754 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
756 /* For mapping page table entries we rely on dma == phys */
757 BUG_ON(handle != virt_to_phys(domain->pgtable));
759 spin_lock_init(&domain->lock);
760 spin_lock_init(&domain->pgtablelock);
761 INIT_LIST_HEAD(&domain->clients);
763 domain->domain.geometry.aperture_start = 0;
764 domain->domain.geometry.aperture_end = ~0UL;
765 domain->domain.geometry.force_aperture = true;
767 return &domain->domain;
770 free_pages((unsigned long)domain->pgtable, 2);
772 if (type == IOMMU_DOMAIN_DMA)
773 iommu_put_dma_cookie(&domain->domain);
779 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
781 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
782 struct sysmmu_drvdata *data, *next;
786 WARN_ON(!list_empty(&domain->clients));
788 spin_lock_irqsave(&domain->lock, flags);
790 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
791 spin_lock(&data->lock);
792 __sysmmu_disable(data);
795 list_del_init(&data->domain_node);
796 spin_unlock(&data->lock);
799 spin_unlock_irqrestore(&domain->lock, flags);
801 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
802 iommu_put_dma_cookie(iommu_domain);
804 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
807 for (i = 0; i < NUM_LV1ENTRIES; i++)
808 if (lv1ent_page(domain->pgtable + i)) {
809 phys_addr_t base = lv2table_base(domain->pgtable + i);
811 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
813 kmem_cache_free(lv2table_kmem_cache,
817 free_pages((unsigned long)domain->pgtable, 2);
818 free_pages((unsigned long)domain->lv2entcnt, 1);
822 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
825 struct exynos_iommu_owner *owner = dev->archdata.iommu;
826 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
827 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
828 struct sysmmu_drvdata *data, *next;
831 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
834 mutex_lock(&owner->rpm_lock);
836 list_for_each_entry(data, &owner->controllers, owner_node) {
837 pm_runtime_get_noresume(data->sysmmu);
838 if (pm_runtime_active(data->sysmmu))
839 __sysmmu_disable(data);
840 pm_runtime_put(data->sysmmu);
843 spin_lock_irqsave(&domain->lock, flags);
844 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
845 spin_lock(&data->lock);
848 list_del_init(&data->domain_node);
849 spin_unlock(&data->lock);
851 owner->domain = NULL;
852 spin_unlock_irqrestore(&domain->lock, flags);
854 mutex_unlock(&owner->rpm_lock);
856 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
860 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
863 struct exynos_iommu_owner *owner = dev->archdata.iommu;
864 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
865 struct sysmmu_drvdata *data;
866 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
869 if (!has_sysmmu(dev))
873 exynos_iommu_detach_device(owner->domain, dev);
875 mutex_lock(&owner->rpm_lock);
877 spin_lock_irqsave(&domain->lock, flags);
878 list_for_each_entry(data, &owner->controllers, owner_node) {
879 spin_lock(&data->lock);
880 data->pgtable = pagetable;
881 data->domain = domain;
882 list_add_tail(&data->domain_node, &domain->clients);
883 spin_unlock(&data->lock);
885 owner->domain = iommu_domain;
886 spin_unlock_irqrestore(&domain->lock, flags);
888 list_for_each_entry(data, &owner->controllers, owner_node) {
889 pm_runtime_get_noresume(data->sysmmu);
890 if (pm_runtime_active(data->sysmmu))
891 __sysmmu_enable(data);
892 pm_runtime_put(data->sysmmu);
895 mutex_unlock(&owner->rpm_lock);
897 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
903 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
904 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
906 if (lv1ent_section(sent)) {
907 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
908 return ERR_PTR(-EADDRINUSE);
911 if (lv1ent_fault(sent)) {
913 bool need_flush_flpd_cache = lv1ent_zero(sent);
915 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
916 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
918 return ERR_PTR(-ENOMEM);
920 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
921 kmemleak_ignore(pent);
922 *pgcounter = NUM_LV2ENTRIES;
923 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
926 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
927 * FLPD cache may cache the address of zero_l2_table. This
928 * function replaces the zero_l2_table with new L2 page table
929 * to write valid mappings.
930 * Accessing the valid area may cause page fault since FLPD
931 * cache may still cache zero_l2_table for the valid area
932 * instead of new L2 page table that has the mapping
933 * information of the valid area.
934 * Thus any replacement of zero_l2_table with other valid L2
935 * page table must involve FLPD cache invalidation for System
937 * FLPD cache invalidation is performed with TLB invalidation
938 * by VPN without blocking. It is safe to invalidate TLB without
939 * blocking because the target address of TLB invalidation is
940 * not currently mapped.
942 if (need_flush_flpd_cache) {
943 struct sysmmu_drvdata *data;
945 spin_lock(&domain->lock);
946 list_for_each_entry(data, &domain->clients, domain_node)
947 sysmmu_tlb_invalidate_flpdcache(data, iova);
948 spin_unlock(&domain->lock);
952 return page_entry(sent, iova);
955 static int lv1set_section(struct exynos_iommu_domain *domain,
956 sysmmu_pte_t *sent, sysmmu_iova_t iova,
957 phys_addr_t paddr, int prot, short *pgcnt)
959 if (lv1ent_section(sent)) {
960 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
965 if (lv1ent_page(sent)) {
966 if (*pgcnt != NUM_LV2ENTRIES) {
967 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
972 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
976 update_pte(sent, mk_lv1ent_sect(paddr, prot));
978 spin_lock(&domain->lock);
979 if (lv1ent_page_zero(sent)) {
980 struct sysmmu_drvdata *data;
982 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
983 * entry by speculative prefetch of SLPD which has no mapping.
985 list_for_each_entry(data, &domain->clients, domain_node)
986 sysmmu_tlb_invalidate_flpdcache(data, iova);
988 spin_unlock(&domain->lock);
993 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
994 int prot, short *pgcnt)
996 if (size == SPAGE_SIZE) {
997 if (WARN_ON(!lv2ent_fault(pent)))
1000 update_pte(pent, mk_lv2ent_spage(paddr, prot));
1002 } else { /* size == LPAGE_SIZE */
1004 dma_addr_t pent_base = virt_to_phys(pent);
1006 dma_sync_single_for_cpu(dma_dev, pent_base,
1007 sizeof(*pent) * SPAGES_PER_LPAGE,
1009 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1010 if (WARN_ON(!lv2ent_fault(pent))) {
1012 memset(pent - i, 0, sizeof(*pent) * i);
1016 *pent = mk_lv2ent_lpage(paddr, prot);
1018 dma_sync_single_for_device(dma_dev, pent_base,
1019 sizeof(*pent) * SPAGES_PER_LPAGE,
1021 *pgcnt -= SPAGES_PER_LPAGE;
1028 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1030 * System MMU v3.x has advanced logic to improve address translation
1031 * performance with caching more page table entries by a page table walk.
1032 * However, the logic has a bug that while caching faulty page table entries,
1033 * System MMU reports page fault if the cached fault entry is hit even though
1034 * the fault entry is updated to a valid entry after the entry is cached.
1035 * To prevent caching faulty page table entries which may be updated to valid
1036 * entries later, the virtual memory manager should care about the workaround
1037 * for the problem. The following describes the workaround.
1039 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1040 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1042 * Precisely, any start address of I/O virtual region must be aligned with
1043 * the following sizes for System MMU v3.1 and v3.2.
1044 * System MMU v3.1: 128KiB
1045 * System MMU v3.2: 256KiB
1047 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1049 * - Any two consecutive I/O virtual regions must have a hole of size larger
1050 * than or equal to 128KiB.
1051 * - Start address of an I/O virtual region must be aligned by 128KiB.
1053 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1054 unsigned long l_iova, phys_addr_t paddr, size_t size,
1057 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1058 sysmmu_pte_t *entry;
1059 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1060 unsigned long flags;
1063 BUG_ON(domain->pgtable == NULL);
1064 prot &= SYSMMU_SUPPORTED_PROT_BITS;
1066 spin_lock_irqsave(&domain->pgtablelock, flags);
1068 entry = section_entry(domain->pgtable, iova);
1070 if (size == SECT_SIZE) {
1071 ret = lv1set_section(domain, entry, iova, paddr, prot,
1072 &domain->lv2entcnt[lv1ent_offset(iova)]);
1076 pent = alloc_lv2entry(domain, entry, iova,
1077 &domain->lv2entcnt[lv1ent_offset(iova)]);
1080 ret = PTR_ERR(pent);
1082 ret = lv2set_page(pent, paddr, size, prot,
1083 &domain->lv2entcnt[lv1ent_offset(iova)]);
1087 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1088 __func__, ret, size, iova);
1090 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1095 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1096 sysmmu_iova_t iova, size_t size)
1098 struct sysmmu_drvdata *data;
1099 unsigned long flags;
1101 spin_lock_irqsave(&domain->lock, flags);
1103 list_for_each_entry(data, &domain->clients, domain_node)
1104 sysmmu_tlb_invalidate_entry(data, iova, size);
1106 spin_unlock_irqrestore(&domain->lock, flags);
1109 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1110 unsigned long l_iova, size_t size)
1112 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1113 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1116 unsigned long flags;
1118 BUG_ON(domain->pgtable == NULL);
1120 spin_lock_irqsave(&domain->pgtablelock, flags);
1122 ent = section_entry(domain->pgtable, iova);
1124 if (lv1ent_section(ent)) {
1125 if (WARN_ON(size < SECT_SIZE)) {
1126 err_pgsize = SECT_SIZE;
1130 /* workaround for h/w bug in System MMU v3.3 */
1131 update_pte(ent, ZERO_LV2LINK);
1136 if (unlikely(lv1ent_fault(ent))) {
1137 if (size > SECT_SIZE)
1142 /* lv1ent_page(sent) == true here */
1144 ent = page_entry(ent, iova);
1146 if (unlikely(lv2ent_fault(ent))) {
1151 if (lv2ent_small(ent)) {
1154 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1158 /* lv1ent_large(ent) == true here */
1159 if (WARN_ON(size < LPAGE_SIZE)) {
1160 err_pgsize = LPAGE_SIZE;
1164 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1165 sizeof(*ent) * SPAGES_PER_LPAGE,
1167 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1168 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1169 sizeof(*ent) * SPAGES_PER_LPAGE,
1172 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1174 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1176 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1180 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1182 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1183 __func__, size, iova, err_pgsize);
1188 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1191 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1192 sysmmu_pte_t *entry;
1193 unsigned long flags;
1194 phys_addr_t phys = 0;
1196 spin_lock_irqsave(&domain->pgtablelock, flags);
1198 entry = section_entry(domain->pgtable, iova);
1200 if (lv1ent_section(entry)) {
1201 phys = section_phys(entry) + section_offs(iova);
1202 } else if (lv1ent_page(entry)) {
1203 entry = page_entry(entry, iova);
1205 if (lv2ent_large(entry))
1206 phys = lpage_phys(entry) + lpage_offs(iova);
1207 else if (lv2ent_small(entry))
1208 phys = spage_phys(entry) + spage_offs(iova);
1211 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1216 static struct iommu_group *get_device_iommu_group(struct device *dev)
1218 struct iommu_group *group;
1220 group = iommu_group_get(dev);
1222 group = iommu_group_alloc();
1227 static int exynos_iommu_add_device(struct device *dev)
1229 struct iommu_group *group;
1231 if (!has_sysmmu(dev))
1234 group = iommu_group_get_for_dev(dev);
1237 return PTR_ERR(group);
1239 iommu_group_put(group);
1244 static void exynos_iommu_remove_device(struct device *dev)
1246 if (!has_sysmmu(dev))
1249 iommu_group_remove_device(dev);
1252 static int exynos_iommu_of_xlate(struct device *dev,
1253 struct of_phandle_args *spec)
1255 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1256 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1257 struct sysmmu_drvdata *data;
1262 data = platform_get_drvdata(sysmmu);
1267 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1271 INIT_LIST_HEAD(&owner->controllers);
1272 mutex_init(&owner->rpm_lock);
1273 dev->archdata.iommu = owner;
1276 list_add_tail(&data->owner_node, &owner->controllers);
1280 * SYSMMU will be runtime activated via device link (dependency) to its
1281 * master device, so there are no direct calls to pm_runtime_get/put
1284 device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
1289 static struct iommu_ops exynos_iommu_ops = {
1290 .domain_alloc = exynos_iommu_domain_alloc,
1291 .domain_free = exynos_iommu_domain_free,
1292 .attach_dev = exynos_iommu_attach_device,
1293 .detach_dev = exynos_iommu_detach_device,
1294 .map = exynos_iommu_map,
1295 .unmap = exynos_iommu_unmap,
1296 .map_sg = default_iommu_map_sg,
1297 .iova_to_phys = exynos_iommu_iova_to_phys,
1298 .device_group = get_device_iommu_group,
1299 .add_device = exynos_iommu_add_device,
1300 .remove_device = exynos_iommu_remove_device,
1301 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1302 .of_xlate = exynos_iommu_of_xlate,
1305 static bool init_done;
1307 static int __init exynos_iommu_init(void)
1311 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1312 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1313 if (!lv2table_kmem_cache) {
1314 pr_err("%s: Failed to create kmem cache\n", __func__);
1318 ret = platform_driver_register(&exynos_sysmmu_driver);
1320 pr_err("%s: Failed to register driver\n", __func__);
1321 goto err_reg_driver;
1324 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1325 if (zero_lv2_table == NULL) {
1326 pr_err("%s: Failed to allocate zero level2 page table\n",
1332 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1334 pr_err("%s: Failed to register exynos-iommu driver.\n",
1343 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1345 platform_driver_unregister(&exynos_sysmmu_driver);
1347 kmem_cache_destroy(lv2table_kmem_cache);
1351 static int __init exynos_iommu_of_setup(struct device_node *np)
1353 struct platform_device *pdev;
1356 exynos_iommu_init();
1358 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1363 * use the first registered sysmmu device for performing
1364 * dma mapping operations on iommu page tables (cpu cache flush)
1367 dma_dev = &pdev->dev;
1372 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1373 exynos_iommu_of_setup);