struct amdgpu_bo *bo[2] = {NULL, NULL};
struct amdgpu_bo_va *bo_va;
bool same_hive = false;
+ struct drm_exec exec;
int i, ret;
if (!va) {
goto unwind;
}
- /* Add BO to VM internal data structures */
- ret = amdgpu_bo_reserve(bo[i], false);
- if (ret) {
- pr_debug("Unable to reserve BO during memory attach");
- goto unwind;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unwind;
+ ret = drm_exec_lock_obj(&exec, &bo[i]->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unwind;
}
+
bo_va = amdgpu_vm_bo_find(vm, bo[i]);
if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
else
++bo_va->ref_count;
attachment[i]->bo_va = bo_va;
- amdgpu_bo_unreserve(bo[i]);
+ drm_exec_fini(&exec);
if (unlikely(!attachment[i]->bo_va)) {
ret = -ENOMEM;
pr_err("Failed to add BO object to VM. ret == %d\n",
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
struct mm_struct *mm;
+ struct drm_exec exec;
int r;
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
!amdgpu_vm_is_bo_always_valid(vm, abo))
return -EPERM;
- r = amdgpu_bo_reserve(abo, false);
- if (r)
- return r;
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_obj(&exec, &abo->tbo.base, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+ }
amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
amdgpu_bo_unreserve(abo);
return r;
}
-
- amdgpu_bo_unreserve(abo);
+ drm_exec_fini(&exec);
/* Validate and add eviction fence to DMABuf imports with dynamic
* attachment in compute VMs. Re-validation will be done by
}
}
mutex_unlock(&vm->process_info->lock);
+ return r;
+out_unlock:
+ drm_exec_fini(&exec);
return r;
}
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv;
+ struct drm_exec exec;
int r, pasid;
/* Ensure IB tests are run on ring */
if (r)
goto error_pasid;
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_vm;
+ }
+
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
+ drm_exec_fini(&exec);
if (!fpriv->prt_va) {
r = -ENOMEM;
goto error_vm;
{
struct amdgpu_bo_va *bo_va;
+ amdgpu_vm_assert_locked(vm);
+
bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
if (bo_va == NULL) {
return NULL;