1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 #include <drm/panfrost_drm.h>
6 #include <linux/atomic.h>
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/iommu.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/sizes.h>
20 #include "panfrost_device.h"
21 #include "panfrost_mmu.h"
22 #include "panfrost_gem.h"
23 #include "panfrost_features.h"
24 #include "panfrost_regs.h"
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
34 /* Wait for the MMU status to indicate there is no active command, in
35 * case one is pending. */
36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
40 /* The GPU hung, let's trigger a reset */
41 panfrost_device_schedule_reset(pfdev);
42 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
48 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
52 /* write AS_COMMAND when MMU is ready to accept another command */
53 status = wait_ready(pfdev, as_nr);
55 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
60 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
61 u64 iova, size_t size)
64 u64 region = iova & PAGE_MASK;
70 * results in the range (11 .. 42)
73 size = round_up(size, PAGE_SIZE);
75 region_width = 10 + fls(size >> PAGE_SHIFT);
76 if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
77 /* not pow2, so must go up to the next pow2 */
80 region |= region_width;
82 /* Lock the region that needs to be updated */
83 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
84 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
85 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
89 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
90 u64 iova, size_t size, u32 op)
95 if (op != AS_COMMAND_UNLOCK)
96 lock_region(pfdev, as_nr, iova, size);
98 /* Run the MMU operation */
99 write_cmd(pfdev, as_nr, op);
101 /* Wait for the flush to complete */
102 return wait_ready(pfdev, as_nr);
105 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
106 struct panfrost_mmu *mmu,
107 u64 iova, size_t size, u32 op)
111 spin_lock(&pfdev->as_lock);
112 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
113 spin_unlock(&pfdev->as_lock);
117 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
120 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
121 u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
122 u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
124 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
126 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
127 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
129 /* Need to revisit mem attrs.
130 * NC is the default, Mali driver is inner WT.
132 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
133 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
135 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
138 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
140 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
142 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
143 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
145 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
146 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
148 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
151 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
155 spin_lock(&pfdev->as_lock);
159 int en = atomic_inc_return(&mmu->as_count);
160 u32 mask = BIT(as) | BIT(16 + as);
163 * AS can be retained by active jobs or a perfcnt context,
164 * hence the '+ 1' here.
166 WARN_ON(en >= (NUM_JOB_SLOTS + 1));
168 list_move(&mmu->list, &pfdev->as_lru_list);
170 if (pfdev->as_faulty_mask & mask) {
171 /* Unhandled pagefault on this AS, the MMU was
172 * disabled. We need to re-enable the MMU after
173 * clearing+unmasking the AS interrupts.
175 mmu_write(pfdev, MMU_INT_CLEAR, mask);
176 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
177 pfdev->as_faulty_mask &= ~mask;
178 panfrost_mmu_enable(pfdev, mmu);
184 /* Check for a free AS */
185 as = ffz(pfdev->as_alloc_mask);
186 if (!(BIT(as) & pfdev->features.as_present)) {
187 struct panfrost_mmu *lru_mmu;
189 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
190 if (!atomic_read(&lru_mmu->as_count))
193 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
195 list_del_init(&lru_mmu->list);
202 /* Assign the free or reclaimed AS to the FD */
204 set_bit(as, &pfdev->as_alloc_mask);
205 atomic_set(&mmu->as_count, 1);
206 list_add(&mmu->list, &pfdev->as_lru_list);
208 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
210 panfrost_mmu_enable(pfdev, mmu);
213 spin_unlock(&pfdev->as_lock);
217 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
219 atomic_dec(&mmu->as_count);
220 WARN_ON(atomic_read(&mmu->as_count) < 0);
223 void panfrost_mmu_reset(struct panfrost_device *pfdev)
225 struct panfrost_mmu *mmu, *mmu_tmp;
227 spin_lock(&pfdev->as_lock);
229 pfdev->as_alloc_mask = 0;
230 pfdev->as_faulty_mask = 0;
232 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
234 atomic_set(&mmu->as_count, 0);
235 list_del_init(&mmu->list);
238 spin_unlock(&pfdev->as_lock);
240 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
241 mmu_write(pfdev, MMU_INT_MASK, ~0);
244 static size_t get_pgsize(u64 addr, size_t size)
246 if (addr & (SZ_2M - 1) || size < SZ_2M)
252 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
253 struct panfrost_mmu *mmu,
254 u64 iova, size_t size)
259 pm_runtime_get_noresume(pfdev->dev);
261 /* Flush the PTs only if we're already awake */
262 if (pm_runtime_active(pfdev->dev))
263 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
265 pm_runtime_put_sync_autosuspend(pfdev->dev);
268 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
269 u64 iova, int prot, struct sg_table *sgt)
272 struct scatterlist *sgl;
273 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
274 u64 start_iova = iova;
276 for_each_sgtable_dma_sg(sgt, sgl, count) {
277 unsigned long paddr = sg_dma_address(sgl);
278 size_t len = sg_dma_len(sgl);
280 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
283 size_t pgsize = get_pgsize(iova | paddr, len);
285 ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
292 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
297 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
299 struct panfrost_gem_object *bo = mapping->obj;
300 struct drm_gem_object *obj = &bo->base.base;
301 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
302 struct sg_table *sgt;
303 int prot = IOMMU_READ | IOMMU_WRITE;
305 if (WARN_ON(mapping->active))
309 prot |= IOMMU_NOEXEC;
311 sgt = drm_gem_shmem_get_pages_sgt(obj);
312 if (WARN_ON(IS_ERR(sgt)))
315 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
317 mapping->active = true;
322 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
324 struct panfrost_gem_object *bo = mapping->obj;
325 struct drm_gem_object *obj = &bo->base.base;
326 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
327 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
328 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
329 size_t len = mapping->mmnode.size << PAGE_SHIFT;
330 size_t unmapped_len = 0;
332 if (WARN_ON(!mapping->active))
335 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
336 mapping->mmu->as, iova, len);
338 while (unmapped_len < len) {
339 size_t unmapped_page;
340 size_t pgsize = get_pgsize(iova, len - unmapped_len);
342 if (ops->iova_to_phys(ops, iova)) {
343 unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
344 WARN_ON(unmapped_page != pgsize);
347 unmapped_len += pgsize;
350 panfrost_mmu_flush_range(pfdev, mapping->mmu,
351 mapping->mmnode.start << PAGE_SHIFT, len);
352 mapping->active = false;
355 static void mmu_tlb_inv_context_s1(void *cookie)
358 static void mmu_tlb_sync_context(void *cookie)
360 //struct panfrost_mmu *mmu = cookie;
361 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
364 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
367 mmu_tlb_sync_context(cookie);
370 static const struct iommu_flush_ops mmu_tlb_ops = {
371 .tlb_flush_all = mmu_tlb_inv_context_s1,
372 .tlb_flush_walk = mmu_tlb_flush_walk,
375 static struct panfrost_gem_mapping *
376 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
378 struct panfrost_gem_mapping *mapping = NULL;
379 struct drm_mm_node *node;
380 u64 offset = addr >> PAGE_SHIFT;
381 struct panfrost_mmu *mmu;
383 spin_lock(&pfdev->as_lock);
384 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
392 spin_lock(&mmu->mm_lock);
394 drm_mm_for_each_node(node, &mmu->mm) {
395 if (offset >= node->start &&
396 offset < (node->start + node->size)) {
397 mapping = drm_mm_node_to_panfrost_mapping(node);
399 kref_get(&mapping->refcount);
404 spin_unlock(&mmu->mm_lock);
406 spin_unlock(&pfdev->as_lock);
410 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
412 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
416 struct panfrost_gem_mapping *bomapping;
417 struct panfrost_gem_object *bo;
418 struct address_space *mapping;
420 struct sg_table *sgt;
423 bomapping = addr_to_mapping(pfdev, as, addr);
429 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
430 bomapping->mmnode.start << PAGE_SHIFT);
434 WARN_ON(bomapping->mmu->as != as);
436 /* Assume 2MB alignment and size multiple */
437 addr &= ~((u64)SZ_2M - 1);
438 page_offset = addr >> PAGE_SHIFT;
439 page_offset -= bomapping->mmnode.start;
441 mutex_lock(&bo->base.pages_lock);
443 if (!bo->base.pages) {
444 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
445 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
447 mutex_unlock(&bo->base.pages_lock);
452 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
453 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
457 mutex_unlock(&bo->base.pages_lock);
461 bo->base.pages = pages;
462 bo->base.pages_use_count = 1;
464 pages = bo->base.pages;
465 if (pages[page_offset]) {
466 /* Pages are already mapped, bail out. */
467 mutex_unlock(&bo->base.pages_lock);
472 mapping = bo->base.base.filp->f_mapping;
473 mapping_set_unevictable(mapping);
475 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
476 pages[i] = shmem_read_mapping_page(mapping, i);
477 if (IS_ERR(pages[i])) {
478 mutex_unlock(&bo->base.pages_lock);
479 ret = PTR_ERR(pages[i]);
484 mutex_unlock(&bo->base.pages_lock);
486 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
487 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
488 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
492 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
496 mmu_map_sg(pfdev, bomapping->mmu, addr,
497 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
499 bomapping->active = true;
501 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
504 panfrost_gem_mapping_put(bomapping);
511 drm_gem_shmem_put_pages(&bo->base);
513 drm_gem_object_put(&bo->base.base);
517 static void panfrost_mmu_release_ctx(struct kref *kref)
519 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
521 struct panfrost_device *pfdev = mmu->pfdev;
523 spin_lock(&pfdev->as_lock);
525 pm_runtime_get_noresume(pfdev->dev);
526 if (pm_runtime_active(pfdev->dev))
527 panfrost_mmu_disable(pfdev, mmu->as);
528 pm_runtime_put_autosuspend(pfdev->dev);
530 clear_bit(mmu->as, &pfdev->as_alloc_mask);
531 clear_bit(mmu->as, &pfdev->as_in_use_mask);
532 list_del(&mmu->list);
534 spin_unlock(&pfdev->as_lock);
536 free_io_pgtable_ops(mmu->pgtbl_ops);
537 drm_mm_takedown(&mmu->mm);
541 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
543 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
546 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
548 kref_get(&mmu->refcount);
553 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
554 #define PFN_4G_MASK (PFN_4G - 1)
555 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
557 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
559 u64 *start, u64 *end)
561 /* Executable buffers can't start or end on a 4GB boundary */
562 if (!(color & PANFROST_BO_NOEXEC)) {
565 if ((*start & PFN_4G_MASK) == 0)
568 if ((*end & PFN_4G_MASK) == 0)
571 next_seg = ALIGN(*start, PFN_4G);
572 if (next_seg - *start <= PFN_16M)
573 *start = next_seg + 1;
575 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
579 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
581 struct panfrost_mmu *mmu;
583 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
585 return ERR_PTR(-ENOMEM);
588 spin_lock_init(&mmu->mm_lock);
590 /* 4G enough for now. can be 48-bit */
591 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
592 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
594 INIT_LIST_HEAD(&mmu->list);
597 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
598 .pgsize_bitmap = SZ_4K | SZ_2M,
599 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
600 .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
601 .coherent_walk = pfdev->coherent,
603 .iommu_dev = pfdev->dev,
606 mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
608 if (!mmu->pgtbl_ops) {
610 return ERR_PTR(-EINVAL);
613 kref_init(&mmu->refcount);
618 static const char *access_type_name(struct panfrost_device *pfdev,
621 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
622 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
623 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
627 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
629 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
631 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
639 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
641 struct panfrost_device *pfdev = data;
643 if (!mmu_read(pfdev, MMU_INT_STAT))
646 mmu_write(pfdev, MMU_INT_MASK, 0);
647 return IRQ_WAKE_THREAD;
650 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
652 struct panfrost_device *pfdev = data;
653 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
657 u32 as = ffs(status | (status >> 16)) - 1;
658 u32 mask = BIT(as) | BIT(as + 16);
665 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
666 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
667 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
669 /* decode the fault status */
670 exception_type = fault_status & 0xFF;
671 access_type = (fault_status >> 8) & 0x3;
672 source_id = (fault_status >> 16);
674 mmu_write(pfdev, MMU_INT_CLEAR, mask);
676 /* Page fault only */
678 if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
679 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
682 /* terminal fault, print info about the fault */
684 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
686 "raw fault status: 0x%X\n"
687 "decoded fault status: %s\n"
688 "exception type 0x%X: %s\n"
689 "access type 0x%X: %s\n"
694 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
695 exception_type, panfrost_exception_name(exception_type),
696 access_type, access_type_name(pfdev, fault_status),
699 spin_lock(&pfdev->as_lock);
700 /* Ignore MMU interrupts on this AS until it's been
703 pfdev->as_faulty_mask |= mask;
705 /* Disable the MMU to kill jobs on this AS. */
706 panfrost_mmu_disable(pfdev, as);
707 spin_unlock(&pfdev->as_lock);
712 /* If we received new MMU interrupts, process them before returning. */
714 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
717 spin_lock(&pfdev->as_lock);
718 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
719 spin_unlock(&pfdev->as_lock);
724 int panfrost_mmu_init(struct panfrost_device *pfdev)
728 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
732 err = devm_request_threaded_irq(pfdev->dev, irq,
733 panfrost_mmu_irq_handler,
734 panfrost_mmu_irq_handler_thread,
735 IRQF_SHARED, KBUILD_MODNAME "-mmu",
739 dev_err(pfdev->dev, "failed to request mmu irq");
746 void panfrost_mmu_fini(struct panfrost_device *pfdev)
748 mmu_write(pfdev, MMU_INT_MASK, 0);