enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ signed long ret;
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (IS_ERR_OR_NULL(sg_table))
+ return sg_table;
+
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
+ return ERR_PTR(ret);
+ }
+ }
- if (!IS_ERR_OR_NULL(sg_table))
- mangle_sg_table(sg_table);
-
+ mangle_sg_table(sg_table);
return sg_table;
}
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int r;
/* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return r;
-
- if (bo->tbo.moving) {
- r = dma_fence_wait(bo->tbo.moving, true);
- if (r) {
- amdgpu_bo_unpin(bo);
- return r;
- }
- }
- return 0;
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
}
/**
if (ret)
return -EINVAL;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
- if (ret)
- goto error;
-
- if (nvbo->bo.moving)
- ret = dma_fence_wait(nvbo->bo.moving, true);
-
- ttm_bo_unreserve(&nvbo->bo);
- if (ret)
- goto error;
-
- return ret;
-
-error:
- nouveau_bo_unpin(nvbo);
- return ret;
+ return 0;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (unlikely(ret))
- goto error;
-
- if (bo->tbo.moving) {
- ret = dma_fence_wait(bo->tbo.moving, false);
- if (unlikely(ret)) {
- radeon_bo_unpin(bo);
- goto error;
- }
- }
-
- bo->prime_shared_count++;
-error:
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
radeon_bo_unreserve(bo);
return ret;
}