if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n,
- dobj->tbo.resv);
+ dobj->tbo.base.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n,
- dobj->tbo.resv);
+ dobj->tbo.base.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
- resv = reloc->robj->tbo.resv;
+ resv = reloc->robj->tbo.base.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
reloc->tv.num_shared);
if (r)
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
}
robj = gem_to_radeon_bo(gobj);
- r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
}
robj = gem_to_radeon_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
continue;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, acc_size,
sg, resv, &radeon_ttm_bo_destroy);
- bo->tbo.base.resv = bo->tbo.resv;
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
int steal;
int i;
- reservation_object_assert_held(bo->tbo.resv);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (!bo->tiling_flags)
return 0;
uint32_t *tiling_flags,
uint32_t *pitch)
{
- reservation_object_assert_held(bo->tbo.resv);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
bool force_drop)
{
if (!force_drop)
- reservation_object_assert_held(bo->tbo.resv);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object *resv = bo->tbo.base.resv;
if (shared)
reservation_object_add_shared_fence(resv, &fence->base);
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
- fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+ fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.resv);
+ f = reservation_object_get_excl(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
+ radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
uint64_t pte;
int r;
- radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.resv, 1);
+ radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
+ r = reservation_object_reserve_shared(pt->tbo.base.resv, 1);
if (r)
return r;