long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout_rcu(resv, write, true,
- MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
EXPORT_SYMBOL(dma_resv_copy_fences);
/**
- * dma_resv_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @pfence_excl: the returned exclusive fence (or NULL)
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned int *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+ unsigned int *pshared_count,
+ struct dma_fence ***pshared)
{
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
*pshared = shared;
return ret;
}
-EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
- * dma_resv_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
* @intr: if true, do interruptible wait
* @timeout: timeout value in jiffies or zero to return immediately
*
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
* RETURNS
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
- bool wait_all, bool intr,
- unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout)
{
long ret = timeout ? timeout : 1;
unsigned int seq, shared_count;
rcu_read_unlock();
goto retry;
}
-EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
}
/**
- * dma_resv_test_signaled_rcu - Test if a reservation object's
- * fences have been signaled.
+ * dma_resv_test_signaled - Test if a reservation object's fences have been
+ * signaled.
* @obj: the reservation object
* @test_all: if true, test all fences, otherwise only test the exclusive
* fence
*
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
* RETURNS
* true if all fences signaled, else false
*/
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
unsigned int seq, shared_count;
int ret;
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
goto unpin;
}
- r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
- &work->shared_count,
- &work->shared);
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+ &work->shared_count, &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
- r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences(obj, NULL, &count, &fences);
if (r)
return r;
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
unsigned count;
int r;
- r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences(resv, NULL, &count, &fences);
if (r)
goto fallback;
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
mmu_interval_set_seq(mni, cur_seq);
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
return 0;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false,
- msecs_to_jiffies(10));
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
unsigned i, shared_count;
int r;
- r = dma_resv_get_fences_rcu(resv, &excl,
- &shared_count, &shared);
+ r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
return false;
/* Try to block ongoing updates */
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
- true, true, timeout);
+ timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
+ true, timeout);
if (timeout <= 0)
return timeout;
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
- false,
- msecs_to_jiffies(5000));
+ r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+ msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
return -EINVAL;
}
- ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
- true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
return drm_gem_fence_array_add(fence_array, fence);
}
- ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+ ret = dma_resv_get_fences(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled_rcu(obj->resv,
- write))
+ if (!dma_resv_test_signaled(obj->resv, write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout_rcu(obj->resv,
- write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences_rcu(robj, &bo->excl,
- &bo->nr_shared,
- &bo->shared);
+ ret = dma_resv_get_fences(robj, &bo->excl,
+ &bo->nr_shared,
+ &bo->shared);
if (ret)
return ret;
} else {
void dma_resv_prune(struct dma_resv *resv)
{
if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
if (DBG_FORCE_RELOC)
return false;
- return !dma_resv_test_signaled_rcu(vma->resv, true);
+ return !dma_resv_test_signaled(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout_rcu(obj->resv, write,
- true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
- no_wait ? 0 : 30 * HZ);
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
if (!gem_obj)
return -ENOENT;
- ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
- true, timeout);
+ ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
return true;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
ret = 0;
else
ret = -EBUSY;
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
+ dma_resv_wait_timeout(bo->base.resv, true, false,
+ 30 * HZ);
}
if (bo->bdev->funcs->release_notify)
ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, true) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
- interruptible, timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
+ timeout);
if (timeout < 0)
return timeout;
/* Check for a conflicting fence */
resv = obj->resv;
- if (!dma_resv_test_signaled_rcu(resv,
- arg->flags & VGEM_FENCE_WRITE)) {
+ if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
}
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, true);
} else {
- ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
}
if (ret == 0)
ret = -EBUSY;
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout_rcu
- (bo->base.resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ lret = dma_resv_wait_timeout(bo->base.resv, true, true,
+ nonblock ? 0 :
+ MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+ unsigned *pshared_count, struct dma_fence ***pshared);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
#endif /* _LINUX_RESERVATION_H */