accel/ivpu: Unmap partially mapped BOs in case of errors
authorKarol Wachowski <karol.wachowski@intel.com>
Thu, 17 Oct 2024 14:58:11 +0000 (16:58 +0200)
committerJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Wed, 30 Oct 2024 09:22:07 +0000 (10:22 +0100)
Ensure that all buffers that were created only partially through
allocated scatter-gather table are unmapped from MMU600 in case of errors.

Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241017145817.121590-6-jacek.lawrynowicz@linux.intel.com
drivers/accel/ivpu/ivpu_mmu_context.c

index 8992fe9..697b570 100644 (file)
@@ -432,6 +432,7 @@ int
 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
                         u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
 {
+       size_t start_vpu_addr = vpu_addr;
        struct scatterlist *sg;
        int ret;
        u64 prot;
@@ -462,7 +463,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
                ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
                if (ret) {
                        ivpu_err(vdev, "Failed to map context pages\n");
-                       goto err_unlock;
+                       goto err_unmap_pages;
                }
                vpu_addr += size;
        }
@@ -472,7 +473,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
                if (ret) {
                        ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
                                 ctx->id, ret);
-                       goto err_unlock;
+                       goto err_unmap_pages;
                }
                ctx->is_cd_valid = true;
        }
@@ -480,17 +481,19 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
        /* Ensure page table modifications are flushed from wc buffers to memory */
        wmb();
 
-       mutex_unlock(&ctx->lock);
-
        ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
-       if (ret)
+       if (ret) {
                ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
-       return ret;
+               goto err_unmap_pages;
+       }
 
-err_unlock:
        mutex_unlock(&ctx->lock);
-       return ret;
+       return 0;
 
+err_unmap_pages:
+       ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
+       mutex_unlock(&ctx->lock);
+       return ret;
 }
 
 void