dma-buf: wait for map to complete for static attachments
authorChristian König <christian.koenig@amd.com>
Tue, 23 Nov 2021 08:58:36 +0000 (09:58 +0100)
committerChristian König <christian.koenig@amd.com>
Thu, 7 Apr 2022 10:53:54 +0000 (12:53 +0200)
We have previously done that in the individual drivers but it is
more defensive to move that into the common code.

Dynamic attachments should wait for map operations to complete by themselves.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-12-christian.koenig@amd.com
drivers/dma-buf/dma-buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/radeon/radeon_prime.c

index 1cddb65..7979585 100644 (file)
@@ -661,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
                                       enum dma_data_direction direction)
 {
        struct sg_table *sg_table;
+       signed long ret;
 
        sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+       if (IS_ERR_OR_NULL(sg_table))
+               return sg_table;
+
+       if (!dma_buf_attachment_is_dynamic(attach)) {
+               ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+                                           DMA_RESV_USAGE_KERNEL, true,
+                                           MAX_SCHEDULE_TIMEOUT);
+               if (ret < 0) {
+                       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+                                                          direction);
+                       return ERR_PTR(ret);
+               }
+       }
 
-       if (!IS_ERR_OR_NULL(sg_table))
-               mangle_sg_table(sg_table);
-
+       mangle_sg_table(sg_table);
        return sg_table;
 }
 
index 579adfa..782cbca 100644 (file)
@@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
 {
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-       int r;
 
        /* pin buffer into GTT */
-       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
-       if (r)
-               return r;
-
-       if (bo->tbo.moving) {
-               r = dma_fence_wait(bo->tbo.moving, true);
-               if (r) {
-                       amdgpu_bo_unpin(bo);
-                       return r;
-               }
-       }
-       return 0;
+       return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
 }
 
 /**
index 60019d0..3474886 100644 (file)
@@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
        if (ret)
                return -EINVAL;
 
-       ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
-       if (ret)
-               goto error;
-
-       if (nvbo->bo.moving)
-               ret = dma_fence_wait(nvbo->bo.moving, true);
-
-       ttm_bo_unreserve(&nvbo->bo);
-       if (ret)
-               goto error;
-
-       return ret;
-
-error:
-       nouveau_bo_unpin(nvbo);
-       return ret;
+       return 0;
 }
 
 void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
index 4a90807..42a8794 100644 (file)
@@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
 
        /* pin buffer into GTT */
        ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-       if (unlikely(ret))
-               goto error;
-
-       if (bo->tbo.moving) {
-               ret = dma_fence_wait(bo->tbo.moving, false);
-               if (unlikely(ret)) {
-                       radeon_bo_unpin(bo);
-                       goto error;
-               }
-       }
-
-       bo->prime_shared_count++;
-error:
+       if (likely(ret == 0))
+               bo->prime_shared_count++;
+
        radeon_bo_unreserve(bo);
        return ret;
 }