1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/atomic.h>
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
9 #include <linux/iopoll.h>
10 #include <linux/io-pgtable.h>
11 #include <linux/iommu.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/shmem_fs.h>
15 #include <linux/sizes.h>
17 #include "panfrost_device.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_gem.h"
20 #include "panfrost_features.h"
21 #include "panfrost_regs.h"
23 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
24 #define mmu_read(dev, reg) readl(dev->iomem + reg)
26 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
31 /* Wait for the MMU status to indicate there is no active command, in
32 * case one is pending. */
33 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
34 val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
37 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
42 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
46 /* write AS_COMMAND when MMU is ready to accept another command */
47 status = wait_ready(pfdev, as_nr);
49 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
54 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
55 u64 iova, size_t size)
58 u64 region = iova & PAGE_MASK;
64 * results in the range (11 .. 42)
67 size = round_up(size, PAGE_SIZE);
69 region_width = 10 + fls(size >> PAGE_SHIFT);
70 if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
71 /* not pow2, so must go up to the next pow2 */
74 region |= region_width;
76 /* Lock the region that needs to be updated */
77 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
78 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
79 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
83 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
84 u64 iova, size_t size, u32 op)
89 if (op != AS_COMMAND_UNLOCK)
90 lock_region(pfdev, as_nr, iova, size);
92 /* Run the MMU operation */
93 write_cmd(pfdev, as_nr, op);
95 /* Wait for the flush to complete */
96 return wait_ready(pfdev, as_nr);
99 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
100 struct panfrost_mmu *mmu,
101 u64 iova, size_t size, u32 op)
105 spin_lock(&pfdev->as_lock);
106 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
107 spin_unlock(&pfdev->as_lock);
111 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
114 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
115 u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
116 u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
118 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
120 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
121 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
123 /* Need to revisit mem attrs.
124 * NC is the default, Mali driver is inner WT.
126 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
127 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
129 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
132 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
134 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
136 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
137 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
139 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
140 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
142 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
145 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
149 spin_lock(&pfdev->as_lock);
153 int en = atomic_inc_return(&mmu->as_count);
154 WARN_ON(en >= NUM_JOB_SLOTS);
156 list_move(&mmu->list, &pfdev->as_lru_list);
160 /* Check for a free AS */
161 as = ffz(pfdev->as_alloc_mask);
162 if (!(BIT(as) & pfdev->features.as_present)) {
163 struct panfrost_mmu *lru_mmu;
165 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
166 if (!atomic_read(&lru_mmu->as_count))
169 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
171 list_del_init(&lru_mmu->list);
178 /* Assign the free or reclaimed AS to the FD */
180 set_bit(as, &pfdev->as_alloc_mask);
181 atomic_set(&mmu->as_count, 1);
182 list_add(&mmu->list, &pfdev->as_lru_list);
184 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
186 panfrost_mmu_enable(pfdev, mmu);
189 spin_unlock(&pfdev->as_lock);
193 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
195 atomic_dec(&mmu->as_count);
196 WARN_ON(atomic_read(&mmu->as_count) < 0);
199 void panfrost_mmu_reset(struct panfrost_device *pfdev)
201 struct panfrost_mmu *mmu, *mmu_tmp;
203 spin_lock(&pfdev->as_lock);
205 pfdev->as_alloc_mask = 0;
207 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
209 atomic_set(&mmu->as_count, 0);
210 list_del_init(&mmu->list);
213 spin_unlock(&pfdev->as_lock);
215 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
216 mmu_write(pfdev, MMU_INT_MASK, ~0);
219 static size_t get_pgsize(u64 addr, size_t size)
221 if (addr & (SZ_2M - 1) || size < SZ_2M)
227 void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
228 struct panfrost_mmu *mmu,
229 u64 iova, size_t size)
234 pm_runtime_get_noresume(pfdev->dev);
236 /* Flush the PTs only if we're already awake */
237 if (pm_runtime_active(pfdev->dev))
238 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
240 pm_runtime_put_sync_autosuspend(pfdev->dev);
243 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
244 u64 iova, int prot, struct sg_table *sgt)
247 struct scatterlist *sgl;
248 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
249 u64 start_iova = iova;
251 for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
252 unsigned long paddr = sg_dma_address(sgl);
253 size_t len = sg_dma_len(sgl);
255 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
258 size_t pgsize = get_pgsize(iova | paddr, len);
260 ops->map(ops, iova, paddr, pgsize, prot);
267 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
272 int panfrost_mmu_map(struct panfrost_gem_object *bo)
274 struct drm_gem_object *obj = &bo->base.base;
275 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
276 struct sg_table *sgt;
277 int prot = IOMMU_READ | IOMMU_WRITE;
279 if (WARN_ON(bo->is_mapped))
283 prot |= IOMMU_NOEXEC;
285 sgt = drm_gem_shmem_get_pages_sgt(obj);
286 if (WARN_ON(IS_ERR(sgt)))
289 mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
290 bo->is_mapped = true;
295 void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
297 struct drm_gem_object *obj = &bo->base.base;
298 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
299 struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
300 u64 iova = bo->node.start << PAGE_SHIFT;
301 size_t len = bo->node.size << PAGE_SHIFT;
302 size_t unmapped_len = 0;
304 if (WARN_ON(!bo->is_mapped))
307 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
309 while (unmapped_len < len) {
310 size_t unmapped_page;
311 size_t pgsize = get_pgsize(iova, len - unmapped_len);
313 if (ops->iova_to_phys(ops, iova)) {
314 unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
315 WARN_ON(unmapped_page != pgsize);
318 unmapped_len += pgsize;
321 panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
322 bo->is_mapped = false;
325 static void mmu_tlb_inv_context_s1(void *cookie)
328 static void mmu_tlb_sync_context(void *cookie)
330 //struct panfrost_device *pfdev = cookie;
331 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
334 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
337 mmu_tlb_sync_context(cookie);
340 static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
343 mmu_tlb_sync_context(cookie);
346 static const struct iommu_flush_ops mmu_tlb_ops = {
347 .tlb_flush_all = mmu_tlb_inv_context_s1,
348 .tlb_flush_walk = mmu_tlb_flush_walk,
349 .tlb_flush_leaf = mmu_tlb_flush_leaf,
352 int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
354 struct panfrost_mmu *mmu = &priv->mmu;
355 struct panfrost_device *pfdev = priv->pfdev;
357 INIT_LIST_HEAD(&mmu->list);
360 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
361 .pgsize_bitmap = SZ_4K | SZ_2M,
362 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
363 .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
365 .iommu_dev = pfdev->dev,
368 mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
376 void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
378 struct panfrost_device *pfdev = priv->pfdev;
379 struct panfrost_mmu *mmu = &priv->mmu;
381 spin_lock(&pfdev->as_lock);
383 pm_runtime_get_noresume(pfdev->dev);
384 if (pm_runtime_active(pfdev->dev))
385 panfrost_mmu_disable(pfdev, mmu->as);
386 pm_runtime_put_autosuspend(pfdev->dev);
388 clear_bit(mmu->as, &pfdev->as_alloc_mask);
389 clear_bit(mmu->as, &pfdev->as_in_use_mask);
390 list_del(&mmu->list);
392 spin_unlock(&pfdev->as_lock);
394 free_io_pgtable_ops(mmu->pgtbl_ops);
397 static struct panfrost_gem_object *
398 addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
400 struct panfrost_gem_object *bo = NULL;
401 struct panfrost_file_priv *priv;
402 struct drm_mm_node *node;
403 u64 offset = addr >> PAGE_SHIFT;
404 struct panfrost_mmu *mmu;
406 spin_lock(&pfdev->as_lock);
407 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
414 priv = container_of(mmu, struct panfrost_file_priv, mmu);
416 spin_lock(&priv->mm_lock);
418 drm_mm_for_each_node(node, &priv->mm) {
419 if (offset >= node->start &&
420 offset < (node->start + node->size)) {
421 bo = drm_mm_node_to_panfrost_bo(node);
422 drm_gem_object_get(&bo->base.base);
427 spin_unlock(&priv->mm_lock);
429 spin_unlock(&pfdev->as_lock);
433 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
435 int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
438 struct panfrost_gem_object *bo;
439 struct address_space *mapping;
441 struct sg_table *sgt;
444 bo = addr_to_drm_mm_node(pfdev, as, addr);
449 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
450 bo->node.start << PAGE_SHIFT);
454 WARN_ON(bo->mmu->as != as);
456 /* Assume 2MB alignment and size multiple */
457 addr &= ~((u64)SZ_2M - 1);
458 page_offset = addr >> PAGE_SHIFT;
459 page_offset -= bo->node.start;
461 mutex_lock(&bo->base.pages_lock);
463 if (!bo->base.pages) {
464 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
465 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
467 mutex_unlock(&bo->base.pages_lock);
472 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
473 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
477 mutex_unlock(&bo->base.pages_lock);
481 bo->base.pages = pages;
482 bo->base.pages_use_count = 1;
484 pages = bo->base.pages;
486 mapping = bo->base.base.filp->f_mapping;
487 mapping_set_unevictable(mapping);
489 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
490 pages[i] = shmem_read_mapping_page(mapping, i);
491 if (IS_ERR(pages[i])) {
492 mutex_unlock(&bo->base.pages_lock);
493 ret = PTR_ERR(pages[i]);
498 mutex_unlock(&bo->base.pages_lock);
500 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
501 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
502 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
506 if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
511 mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
513 bo->is_mapped = true;
515 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
517 drm_gem_object_put_unlocked(&bo->base.base);
524 drm_gem_shmem_put_pages(&bo->base);
526 drm_gem_object_put_unlocked(&bo->base.base);
530 static const char *access_type_name(struct panfrost_device *pfdev,
533 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
534 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
535 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
539 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
541 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
543 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
551 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
553 struct panfrost_device *pfdev = data;
555 if (!mmu_read(pfdev, MMU_INT_STAT))
558 mmu_write(pfdev, MMU_INT_MASK, 0);
559 return IRQ_WAKE_THREAD;
562 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
564 struct panfrost_device *pfdev = data;
565 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
568 for (i = 0; status; i++) {
569 u32 mask = BIT(i) | BIT(i + 16);
576 if (!(status & mask))
579 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
580 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
581 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
583 /* decode the fault status */
584 exception_type = fault_status & 0xFF;
585 access_type = (fault_status >> 8) & 0x3;
586 source_id = (fault_status >> 16);
588 /* Page fault only */
589 if ((status & mask) == BIT(i)) {
590 WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
592 ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
594 mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
600 /* terminal fault, print info about the fault */
602 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
604 "raw fault status: 0x%X\n"
605 "decoded fault status: %s\n"
606 "exception type 0x%X: %s\n"
607 "access type 0x%X: %s\n"
612 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
613 exception_type, panfrost_exception_name(pfdev, exception_type),
614 access_type, access_type_name(pfdev, fault_status),
617 mmu_write(pfdev, MMU_INT_CLEAR, mask);
622 mmu_write(pfdev, MMU_INT_MASK, ~0);
626 int panfrost_mmu_init(struct panfrost_device *pfdev)
630 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
634 err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
635 panfrost_mmu_irq_handler_thread,
636 IRQF_SHARED, "mmu", pfdev);
639 dev_err(pfdev->dev, "failed to request mmu irq");
646 void panfrost_mmu_fini(struct panfrost_device *pfdev)
648 mmu_write(pfdev, MMU_INT_MASK, 0);