1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for Rockchip
5 * Module Authors: Simon Xue <xxm@rock-chips.com>
6 * Daniel Kurtz <djkurtz@chromium.org>
10 #include <linux/compiler.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/iopoll.h>
20 #include <linux/list.h>
22 #include <linux/init.h>
24 #include <linux/of_platform.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
30 /** MMU register offsets */
31 #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
32 #define RK_MMU_STATUS 0x04
33 #define RK_MMU_COMMAND 0x08
34 #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
35 #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
36 #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
37 #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
38 #define RK_MMU_INT_MASK 0x1C /* IRQ enable */
39 #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
40 #define RK_MMU_AUTO_GATING 0x24
42 #define DTE_ADDR_DUMMY 0xCAFEBABE
44 #define RK_MMU_POLL_PERIOD_US 100
45 #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
46 #define RK_MMU_POLL_TIMEOUT_US 1000
48 /* RK_MMU_STATUS fields */
49 #define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
50 #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
51 #define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
52 #define RK_MMU_STATUS_IDLE BIT(3)
53 #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
54 #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
55 #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
57 /* RK_MMU_COMMAND command values */
58 #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
59 #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
60 #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
61 #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
62 #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
63 #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
64 #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
66 /* RK_MMU_INT_* register fields */
67 #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
68 #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
69 #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
71 #define NUM_DT_ENTRIES 1024
72 #define NUM_PT_ENTRIES 1024
74 #define SPAGE_ORDER 12
75 #define SPAGE_SIZE (1 << SPAGE_ORDER)
78 * Support mapping any size that fits in one page table:
81 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
83 struct rk_iommu_domain {
84 struct list_head iommus;
85 u32 *dt; /* page directory table */
87 spinlock_t iommus_lock; /* lock for iommus list */
88 spinlock_t dt_lock; /* lock for modifying page directory table */
90 struct iommu_domain domain;
93 /* list of clocks required by IOMMU */
94 static const char * const rk_iommu_clocks[] = {
99 phys_addr_t (*pt_address)(u32 dte);
100 u32 (*mk_dtentries)(dma_addr_t pt_dma);
101 u32 (*mk_ptentries)(phys_addr_t page, int prot);
102 phys_addr_t (*dte_addr_phys)(u32 addr);
103 u32 (*dma_addr_dte)(dma_addr_t dt_dma);
109 void __iomem **bases;
112 struct clk_bulk_data *clocks;
115 struct iommu_device iommu;
116 struct list_head node; /* entry in rk_iommu_domain.iommus */
117 struct iommu_domain *domain; /* domain to which iommu is attached */
118 struct iommu_group *group;
121 struct rk_iommudata {
122 struct device_link *link; /* runtime PM link from IOMMU to master */
123 struct rk_iommu *iommu;
126 static struct device *dma_dev;
127 static const struct rk_iommu_ops *rk_ops;
129 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
132 size_t size = count * sizeof(u32); /* count of u32 entry */
134 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
137 static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
139 return container_of(dom, struct rk_iommu_domain, domain);
143 * The Rockchip rk3288 iommu uses a 2-level page table.
144 * The first level is the "Directory Table" (DT).
145 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
147 * The second level is the 1024 Page Tables (PT).
148 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
149 * a 4 KB page of physical memory.
151 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
152 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
153 * address of the start of the DT page.
155 * The structure of the page table is as follows:
158 * MMU_DTE_ADDR -> +-----+
164 * | | | PTE | -> +-----+
165 * +-----+ +-----+ | |
175 * Each DTE has a PT address and a valid bit:
176 * +---------------------+-----------+-+
177 * | PT address | Reserved |V|
178 * +---------------------+-----------+-+
179 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
181 * 0 - 1 if PT @ PT address is valid
183 #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
184 #define RK_DTE_PT_VALID BIT(0)
186 static inline phys_addr_t rk_dte_pt_address(u32 dte)
188 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
193 * 31:12 - PT address bit 31:0
194 * 11: 8 - PT address bit 35:32
195 * 7: 4 - PT address bit 39:36
197 * 0 - 1 if PT @ PT address is valid
199 #define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
200 #define DTE_HI_MASK1 GENMASK(11, 8)
201 #define DTE_HI_MASK2 GENMASK(7, 4)
202 #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
203 #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
204 #define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36)
205 #define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32)
207 static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
211 dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
212 ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
213 (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
215 return (phys_addr_t)dte_v2;
218 static inline bool rk_dte_is_pt_valid(u32 dte)
220 return dte & RK_DTE_PT_VALID;
223 static inline u32 rk_mk_dte(dma_addr_t pt_dma)
225 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
228 static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
230 pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
231 ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
232 (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
234 return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
238 * Each PTE has a Page address, some flags and a valid bit:
239 * +---------------------+---+-------+-+
240 * | Page address |Rsv| Flags |V|
241 * +---------------------+---+-------+-+
242 * 31:12 - Page address (Pages always start on a 4 KB boundary)
245 * 8 - Read allocate - allocate cache space on read misses
246 * 7 - Read cache - enable cache & prefetch of data
247 * 6 - Write buffer - enable delaying writes on their way to memory
248 * 5 - Write allocate - allocate cache space on write misses
249 * 4 - Write cache - different writes can be merged together
250 * 3 - Override cache attributes
251 * if 1, bits 4-8 control cache attributes
252 * if 0, the system bus defaults are used
255 * 0 - 1 if Page @ Page address is valid
257 #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
258 #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
259 #define RK_PTE_PAGE_WRITABLE BIT(2)
260 #define RK_PTE_PAGE_READABLE BIT(1)
261 #define RK_PTE_PAGE_VALID BIT(0)
263 static inline bool rk_pte_is_page_valid(u32 pte)
265 return pte & RK_PTE_PAGE_VALID;
268 /* TODO: set cache flags per prot IOMMU_CACHE */
269 static u32 rk_mk_pte(phys_addr_t page, int prot)
272 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
273 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
274 page &= RK_PTE_PAGE_ADDRESS_MASK;
275 return page | flags | RK_PTE_PAGE_VALID;
280 * 31:12 - Page address bit 31:0
281 * 11:9 - Page address bit 34:32
282 * 8:4 - Page address bit 39:35
286 * 0 - 1 if Page @ Page address is valid
288 #define RK_PTE_PAGE_READABLE_V2 BIT(2)
289 #define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
291 static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
295 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
296 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
298 return rk_mk_dte_v2(page) | flags;
301 static u32 rk_mk_pte_invalid(u32 pte)
303 return pte & ~RK_PTE_PAGE_VALID;
307 * rk3288 iova (IOMMU Virtual Address) format
309 * +-----------+-----------+-------------+
310 * | DTE index | PTE index | Page offset |
311 * +-----------+-----------+-------------+
312 * 31:22 - DTE index - index of DTE in DT
313 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
314 * 11: 0 - Page offset - offset into page @ PTE.page_address
316 #define RK_IOVA_DTE_MASK 0xffc00000
317 #define RK_IOVA_DTE_SHIFT 22
318 #define RK_IOVA_PTE_MASK 0x003ff000
319 #define RK_IOVA_PTE_SHIFT 12
320 #define RK_IOVA_PAGE_MASK 0x00000fff
321 #define RK_IOVA_PAGE_SHIFT 0
323 static u32 rk_iova_dte_index(dma_addr_t iova)
325 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
328 static u32 rk_iova_pte_index(dma_addr_t iova)
330 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
333 static u32 rk_iova_page_offset(dma_addr_t iova)
335 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
338 static u32 rk_iommu_read(void __iomem *base, u32 offset)
340 return readl(base + offset);
343 static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
345 writel(value, base + offset);
348 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
352 for (i = 0; i < iommu->num_mmu; i++)
353 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
356 static void rk_iommu_base_command(void __iomem *base, u32 command)
358 writel(command, base + RK_MMU_COMMAND);
360 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
364 dma_addr_t iova_end = iova_start + size;
366 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
367 * entire iotlb rather than iterate over individual iovas.
369 for (i = 0; i < iommu->num_mmu; i++) {
372 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
373 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
377 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
382 for (i = 0; i < iommu->num_mmu; i++)
383 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
384 RK_MMU_STATUS_STALL_ACTIVE);
389 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
394 for (i = 0; i < iommu->num_mmu; i++)
395 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
396 RK_MMU_STATUS_PAGING_ENABLED);
401 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
406 for (i = 0; i < iommu->num_mmu; i++)
407 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
412 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
417 if (rk_iommu_is_stall_active(iommu))
420 /* Stall can only be enabled if paging is enabled */
421 if (!rk_iommu_is_paging_enabled(iommu))
424 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
426 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
427 val, RK_MMU_POLL_PERIOD_US,
428 RK_MMU_POLL_TIMEOUT_US);
430 for (i = 0; i < iommu->num_mmu; i++)
431 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
432 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
437 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
442 if (!rk_iommu_is_stall_active(iommu))
445 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
447 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
448 !val, RK_MMU_POLL_PERIOD_US,
449 RK_MMU_POLL_TIMEOUT_US);
451 for (i = 0; i < iommu->num_mmu; i++)
452 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
453 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
458 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
463 if (rk_iommu_is_paging_enabled(iommu))
466 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
468 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
469 val, RK_MMU_POLL_PERIOD_US,
470 RK_MMU_POLL_TIMEOUT_US);
472 for (i = 0; i < iommu->num_mmu; i++)
473 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
474 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
479 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
484 if (!rk_iommu_is_paging_enabled(iommu))
487 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
489 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
490 !val, RK_MMU_POLL_PERIOD_US,
491 RK_MMU_POLL_TIMEOUT_US);
493 for (i = 0; i < iommu->num_mmu; i++)
494 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
495 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
500 static int rk_iommu_force_reset(struct rk_iommu *iommu)
506 if (iommu->reset_disabled)
510 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
511 * and verifying that upper 5 nybbles are read back.
513 for (i = 0; i < iommu->num_mmu; i++) {
514 dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
515 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
517 if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
518 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
523 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
525 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
526 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
527 RK_MMU_POLL_TIMEOUT_US);
529 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
536 static inline phys_addr_t rk_dte_addr_phys(u32 addr)
538 return (phys_addr_t)addr;
541 static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
546 #define DT_HI_MASK GENMASK_ULL(39, 32)
547 #define DTE_BASE_HI_MASK GENMASK(11, 4)
550 static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
553 return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
554 ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
557 static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
559 return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
560 ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
563 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
565 void __iomem *base = iommu->bases[index];
566 u32 dte_index, pte_index, page_offset;
568 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
571 phys_addr_t pte_addr_phys = 0;
572 u32 *pte_addr = NULL;
574 phys_addr_t page_addr_phys = 0;
577 dte_index = rk_iova_dte_index(iova);
578 pte_index = rk_iova_pte_index(iova);
579 page_offset = rk_iova_page_offset(iova);
581 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
582 mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
584 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
585 dte_addr = phys_to_virt(dte_addr_phys);
588 if (!rk_dte_is_pt_valid(dte))
591 pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
592 pte_addr = phys_to_virt(pte_addr_phys);
595 if (!rk_pte_is_page_valid(pte))
598 page_addr_phys = rk_ops->pt_address(pte) + page_offset;
599 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
602 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
603 &iova, dte_index, pte_index, page_offset);
604 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
605 &mmu_dte_addr_phys, &dte_addr_phys, dte,
606 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
607 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
610 static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
612 struct rk_iommu *iommu = dev_id;
616 irqreturn_t ret = IRQ_NONE;
619 err = pm_runtime_get_if_in_use(iommu->dev);
620 if (!err || WARN_ON_ONCE(err < 0))
623 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
626 for (i = 0; i < iommu->num_mmu; i++) {
627 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
632 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
634 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
637 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
638 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
639 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
641 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
643 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
645 log_iova(iommu, i, iova);
648 * Report page fault to any installed handlers.
649 * Ignore the return code, though, since we always zap cache
650 * and clear the page fault anyway.
653 report_iommu_fault(iommu->domain, iommu->dev, iova,
656 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
658 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
659 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
662 if (int_status & RK_MMU_IRQ_BUS_ERROR)
663 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
665 if (int_status & ~RK_MMU_IRQ_MASK)
666 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
669 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
672 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
675 pm_runtime_put(iommu->dev);
679 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
682 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
684 phys_addr_t pt_phys, phys = 0;
688 spin_lock_irqsave(&rk_domain->dt_lock, flags);
690 dte = rk_domain->dt[rk_iova_dte_index(iova)];
691 if (!rk_dte_is_pt_valid(dte))
694 pt_phys = rk_ops->pt_address(dte);
695 page_table = (u32 *)phys_to_virt(pt_phys);
696 pte = page_table[rk_iova_pte_index(iova)];
697 if (!rk_pte_is_page_valid(pte))
700 phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
702 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
707 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
708 dma_addr_t iova, size_t size)
710 struct list_head *pos;
713 /* shootdown these iova from all iommus using this domain */
714 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
715 list_for_each(pos, &rk_domain->iommus) {
716 struct rk_iommu *iommu;
719 iommu = list_entry(pos, struct rk_iommu, node);
721 /* Only zap TLBs of IOMMUs that are powered on. */
722 ret = pm_runtime_get_if_in_use(iommu->dev);
723 if (WARN_ON_ONCE(ret < 0))
726 WARN_ON(clk_bulk_enable(iommu->num_clocks,
728 rk_iommu_zap_lines(iommu, iova, size);
729 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
730 pm_runtime_put(iommu->dev);
733 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
736 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
737 dma_addr_t iova, size_t size)
739 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
740 if (size > SPAGE_SIZE)
741 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
745 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
748 u32 *page_table, *dte_addr;
753 assert_spin_locked(&rk_domain->dt_lock);
755 dte_index = rk_iova_dte_index(iova);
756 dte_addr = &rk_domain->dt[dte_index];
758 if (rk_dte_is_pt_valid(dte))
761 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
763 return ERR_PTR(-ENOMEM);
765 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
766 if (dma_mapping_error(dma_dev, pt_dma)) {
767 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
768 free_page((unsigned long)page_table);
769 return ERR_PTR(-ENOMEM);
772 dte = rk_ops->mk_dtentries(pt_dma);
775 rk_table_flush(rk_domain,
776 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
778 pt_phys = rk_ops->pt_address(dte);
779 return (u32 *)phys_to_virt(pt_phys);
782 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
783 u32 *pte_addr, dma_addr_t pte_dma,
786 unsigned int pte_count;
787 unsigned int pte_total = size / SPAGE_SIZE;
789 assert_spin_locked(&rk_domain->dt_lock);
791 for (pte_count = 0; pte_count < pte_total; pte_count++) {
792 u32 pte = pte_addr[pte_count];
793 if (!rk_pte_is_page_valid(pte))
796 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
799 rk_table_flush(rk_domain, pte_dma, pte_count);
801 return pte_count * SPAGE_SIZE;
804 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
805 dma_addr_t pte_dma, dma_addr_t iova,
806 phys_addr_t paddr, size_t size, int prot)
808 unsigned int pte_count;
809 unsigned int pte_total = size / SPAGE_SIZE;
810 phys_addr_t page_phys;
812 assert_spin_locked(&rk_domain->dt_lock);
814 for (pte_count = 0; pte_count < pte_total; pte_count++) {
815 u32 pte = pte_addr[pte_count];
817 if (rk_pte_is_page_valid(pte))
820 pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
825 rk_table_flush(rk_domain, pte_dma, pte_total);
828 * Zap the first and last iova to evict from iotlb any previously
829 * mapped cachelines holding stale values for its dte and pte.
830 * We only zap the first and last iova, since only they could have
831 * dte or pte shared with an existing mapping.
833 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
837 /* Unmap the range of iovas that we just mapped */
838 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
839 pte_count * SPAGE_SIZE);
841 iova += pte_count * SPAGE_SIZE;
842 page_phys = rk_ops->pt_address(pte_addr[pte_count]);
843 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
844 &iova, &page_phys, &paddr, prot);
849 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
850 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
852 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
854 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
855 u32 *page_table, *pte_addr;
856 u32 dte_index, pte_index;
859 spin_lock_irqsave(&rk_domain->dt_lock, flags);
862 * pgsize_bitmap specifies iova sizes that fit in one page table
863 * (1024 4-KiB pages = 4 MiB).
864 * So, size will always be 4096 <= size <= 4194304.
865 * Since iommu_map() guarantees that both iova and size will be
866 * aligned, we will always only be mapping from a single dte here.
868 page_table = rk_dte_get_page_table(rk_domain, iova);
869 if (IS_ERR(page_table)) {
870 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
871 return PTR_ERR(page_table);
874 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
875 pte_index = rk_iova_pte_index(iova);
876 pte_addr = &page_table[pte_index];
878 pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
879 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
882 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
887 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
888 size_t size, struct iommu_iotlb_gather *gather)
890 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
892 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
898 spin_lock_irqsave(&rk_domain->dt_lock, flags);
901 * pgsize_bitmap specifies iova sizes that fit in one page table
902 * (1024 4-KiB pages = 4 MiB).
903 * So, size will always be 4096 <= size <= 4194304.
904 * Since iommu_unmap() guarantees that both iova and size will be
905 * aligned, we will always only be unmapping from a single dte here.
907 dte = rk_domain->dt[rk_iova_dte_index(iova)];
908 /* Just return 0 if iova is unmapped */
909 if (!rk_dte_is_pt_valid(dte)) {
910 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
914 pt_phys = rk_ops->pt_address(dte);
915 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
916 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
917 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
919 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
921 /* Shootdown iotlb entries for iova range that was just unmapped */
922 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
927 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
929 struct rk_iommudata *data = dev_iommu_priv_get(dev);
931 return data ? data->iommu : NULL;
934 /* Must be called with iommu powered on and attached */
935 static void rk_iommu_disable(struct rk_iommu *iommu)
939 /* Ignore error while disabling, just keep going */
940 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
941 rk_iommu_enable_stall(iommu);
942 rk_iommu_disable_paging(iommu);
943 for (i = 0; i < iommu->num_mmu; i++) {
944 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
945 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
947 rk_iommu_disable_stall(iommu);
948 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
951 /* Must be called with iommu powered on and attached */
952 static int rk_iommu_enable(struct rk_iommu *iommu)
954 struct iommu_domain *domain = iommu->domain;
955 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
958 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
962 ret = rk_iommu_enable_stall(iommu);
964 goto out_disable_clocks;
966 ret = rk_iommu_force_reset(iommu);
968 goto out_disable_stall;
970 for (i = 0; i < iommu->num_mmu; i++) {
971 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
972 rk_ops->dma_addr_dte(rk_domain->dt_dma));
973 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
974 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
977 ret = rk_iommu_enable_paging(iommu);
980 rk_iommu_disable_stall(iommu);
982 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
986 static void rk_iommu_detach_device(struct iommu_domain *domain,
989 struct rk_iommu *iommu;
990 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
994 /* Allow 'virtual devices' (eg drm) to detach from domain */
995 iommu = rk_iommu_from_dev(dev);
999 dev_dbg(dev, "Detaching from iommu domain\n");
1001 /* iommu already detached */
1002 if (iommu->domain != domain)
1005 iommu->domain = NULL;
1007 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1008 list_del_init(&iommu->node);
1009 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1011 ret = pm_runtime_get_if_in_use(iommu->dev);
1012 WARN_ON_ONCE(ret < 0);
1014 rk_iommu_disable(iommu);
1015 pm_runtime_put(iommu->dev);
1019 static int rk_iommu_attach_device(struct iommu_domain *domain,
1022 struct rk_iommu *iommu;
1023 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1024 unsigned long flags;
1028 * Allow 'virtual devices' (e.g., drm) to attach to domain.
1029 * Such a device does not belong to an iommu group.
1031 iommu = rk_iommu_from_dev(dev);
1035 dev_dbg(dev, "Attaching to iommu domain\n");
1037 /* iommu already attached */
1038 if (iommu->domain == domain)
1042 rk_iommu_detach_device(iommu->domain, dev);
1044 iommu->domain = domain;
1046 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1047 list_add_tail(&iommu->node, &rk_domain->iommus);
1048 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1050 ret = pm_runtime_get_if_in_use(iommu->dev);
1051 if (!ret || WARN_ON_ONCE(ret < 0))
1054 ret = rk_iommu_enable(iommu);
1056 rk_iommu_detach_device(iommu->domain, dev);
1058 pm_runtime_put(iommu->dev);
1063 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
1065 struct rk_iommu_domain *rk_domain;
1067 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1073 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
1077 if (type == IOMMU_DOMAIN_DMA &&
1078 iommu_get_dma_cookie(&rk_domain->domain))
1079 goto err_free_domain;
1082 * rk32xx iommus use a 2 level pagetable.
1083 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
1084 * Allocate one 4 KiB page for each table.
1086 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
1088 goto err_put_cookie;
1090 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1091 SPAGE_SIZE, DMA_TO_DEVICE);
1092 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1093 dev_err(dma_dev, "DMA map error for DT\n");
1097 spin_lock_init(&rk_domain->iommus_lock);
1098 spin_lock_init(&rk_domain->dt_lock);
1099 INIT_LIST_HEAD(&rk_domain->iommus);
1101 rk_domain->domain.geometry.aperture_start = 0;
1102 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1103 rk_domain->domain.geometry.force_aperture = true;
1105 return &rk_domain->domain;
1108 free_page((unsigned long)rk_domain->dt);
1110 if (type == IOMMU_DOMAIN_DMA)
1111 iommu_put_dma_cookie(&rk_domain->domain);
1118 static void rk_iommu_domain_free(struct iommu_domain *domain)
1120 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1123 WARN_ON(!list_empty(&rk_domain->iommus));
1125 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1126 u32 dte = rk_domain->dt[i];
1127 if (rk_dte_is_pt_valid(dte)) {
1128 phys_addr_t pt_phys = rk_ops->pt_address(dte);
1129 u32 *page_table = phys_to_virt(pt_phys);
1130 dma_unmap_single(dma_dev, pt_phys,
1131 SPAGE_SIZE, DMA_TO_DEVICE);
1132 free_page((unsigned long)page_table);
1136 dma_unmap_single(dma_dev, rk_domain->dt_dma,
1137 SPAGE_SIZE, DMA_TO_DEVICE);
1138 free_page((unsigned long)rk_domain->dt);
1140 if (domain->type == IOMMU_DOMAIN_DMA)
1141 iommu_put_dma_cookie(&rk_domain->domain);
1145 static struct iommu_device *rk_iommu_probe_device(struct device *dev)
1147 struct rk_iommudata *data;
1148 struct rk_iommu *iommu;
1150 data = dev_iommu_priv_get(dev);
1152 return ERR_PTR(-ENODEV);
1154 iommu = rk_iommu_from_dev(dev);
1156 data->link = device_link_add(dev, iommu->dev,
1157 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1159 return &iommu->iommu;
1162 static void rk_iommu_release_device(struct device *dev)
1164 struct rk_iommudata *data = dev_iommu_priv_get(dev);
1166 device_link_del(data->link);
1169 static struct iommu_group *rk_iommu_device_group(struct device *dev)
1171 struct rk_iommu *iommu;
1173 iommu = rk_iommu_from_dev(dev);
1175 return iommu_group_ref_get(iommu->group);
1178 static int rk_iommu_of_xlate(struct device *dev,
1179 struct of_phandle_args *args)
1181 struct platform_device *iommu_dev;
1182 struct rk_iommudata *data;
1184 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1188 iommu_dev = of_find_device_by_node(args->np);
1190 data->iommu = platform_get_drvdata(iommu_dev);
1191 dev_iommu_priv_set(dev, data);
1193 platform_device_put(iommu_dev);
1198 static const struct iommu_ops rk_iommu_ops = {
1199 .domain_alloc = rk_iommu_domain_alloc,
1200 .domain_free = rk_iommu_domain_free,
1201 .attach_dev = rk_iommu_attach_device,
1202 .detach_dev = rk_iommu_detach_device,
1203 .map = rk_iommu_map,
1204 .unmap = rk_iommu_unmap,
1205 .probe_device = rk_iommu_probe_device,
1206 .release_device = rk_iommu_release_device,
1207 .iova_to_phys = rk_iommu_iova_to_phys,
1208 .device_group = rk_iommu_device_group,
1209 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1210 .of_xlate = rk_iommu_of_xlate,
1213 static int rk_iommu_probe(struct platform_device *pdev)
1215 struct device *dev = &pdev->dev;
1216 struct rk_iommu *iommu;
1217 struct resource *res;
1218 const struct rk_iommu_ops *ops;
1219 int num_res = pdev->num_resources;
1222 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1226 platform_set_drvdata(pdev, iommu);
1230 ops = of_device_get_match_data(dev);
1235 * That should not happen unless different versions of the
1236 * hardware block are embedded the same SoC
1238 if (WARN_ON(rk_ops != ops))
1241 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1246 for (i = 0; i < num_res; i++) {
1247 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1250 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1251 if (IS_ERR(iommu->bases[i]))
1255 if (iommu->num_mmu == 0)
1256 return PTR_ERR(iommu->bases[0]);
1258 iommu->num_irq = platform_irq_count(pdev);
1259 if (iommu->num_irq < 0)
1260 return iommu->num_irq;
1262 iommu->reset_disabled = device_property_read_bool(dev,
1263 "rockchip,disable-mmu-reset");
1265 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1266 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1267 sizeof(*iommu->clocks), GFP_KERNEL);
1271 for (i = 0; i < iommu->num_clocks; ++i)
1272 iommu->clocks[i].id = rk_iommu_clocks[i];
1275 * iommu clocks should be present for all new devices and devicetrees
1276 * but there are older devicetrees without clocks out in the wild.
1277 * So clocks as optional for the time being.
1279 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1281 iommu->num_clocks = 0;
1285 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1289 iommu->group = iommu_group_alloc();
1290 if (IS_ERR(iommu->group)) {
1291 err = PTR_ERR(iommu->group);
1292 goto err_unprepare_clocks;
1295 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1299 err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
1301 goto err_remove_sysfs;
1304 * Use the first registered IOMMU device for domain to use with DMA
1305 * API, since a domain might not physically correspond to a single
1309 dma_dev = &pdev->dev;
1311 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1313 pm_runtime_enable(dev);
1315 for (i = 0; i < iommu->num_irq; i++) {
1316 int irq = platform_get_irq(pdev, i);
1321 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1322 IRQF_SHARED, dev_name(dev), iommu);
1324 pm_runtime_disable(dev);
1325 goto err_remove_sysfs;
1329 dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1333 iommu_device_sysfs_remove(&iommu->iommu);
1335 iommu_group_put(iommu->group);
1336 err_unprepare_clocks:
1337 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1341 static void rk_iommu_shutdown(struct platform_device *pdev)
1343 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1346 for (i = 0; i < iommu->num_irq; i++) {
1347 int irq = platform_get_irq(pdev, i);
1349 devm_free_irq(iommu->dev, irq, iommu);
1352 pm_runtime_force_suspend(&pdev->dev);
1355 static int __maybe_unused rk_iommu_suspend(struct device *dev)
1357 struct rk_iommu *iommu = dev_get_drvdata(dev);
1362 rk_iommu_disable(iommu);
1366 static int __maybe_unused rk_iommu_resume(struct device *dev)
1368 struct rk_iommu *iommu = dev_get_drvdata(dev);
1373 return rk_iommu_enable(iommu);
1376 static const struct dev_pm_ops rk_iommu_pm_ops = {
1377 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1378 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1379 pm_runtime_force_resume)
1382 static struct rk_iommu_ops iommu_data_ops_v1 = {
1383 .pt_address = &rk_dte_pt_address,
1384 .mk_dtentries = &rk_mk_dte,
1385 .mk_ptentries = &rk_mk_pte,
1386 .dte_addr_phys = &rk_dte_addr_phys,
1387 .dma_addr_dte = &rk_dma_addr_dte,
1388 .dma_bit_mask = DMA_BIT_MASK(32),
1391 static struct rk_iommu_ops iommu_data_ops_v2 = {
1392 .pt_address = &rk_dte_pt_address_v2,
1393 .mk_dtentries = &rk_mk_dte_v2,
1394 .mk_ptentries = &rk_mk_pte_v2,
1395 .dte_addr_phys = &rk_dte_addr_phys_v2,
1396 .dma_addr_dte = &rk_dma_addr_dte_v2,
1397 .dma_bit_mask = DMA_BIT_MASK(40),
1400 static const struct of_device_id rk_iommu_dt_ids[] = {
1401 { .compatible = "rockchip,iommu",
1402 .data = &iommu_data_ops_v1,
1404 { .compatible = "rockchip,rk3568-iommu",
1405 .data = &iommu_data_ops_v2,
1410 static struct platform_driver rk_iommu_driver = {
1411 .probe = rk_iommu_probe,
1412 .shutdown = rk_iommu_shutdown,
1415 .of_match_table = rk_iommu_dt_ids,
1416 .pm = &rk_iommu_pm_ops,
1417 .suppress_bind_attrs = true,
1421 static int __init rk_iommu_init(void)
1423 return platform_driver_register(&rk_iommu_driver);
1425 subsys_initcall(rk_iommu_init);