drm/amdgpu: wait on page directory changes. v2
authorBas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Fri, 14 Aug 2015 18:08:40 +0000 (20:08 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:51:23 +0000 (16:51 -0400)
Pagetables can be moved and therefore the page directory update can be necessary
for the current cs even if none of the the bo's are moved. In that scenario
there is no fence between the sdma0 and gfx ring, so we add one.

v2 (chk): rebased

Signed-off-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 68beb40..2fc58e6 100644 (file)
@@ -982,6 +982,7 @@ struct amdgpu_vm {
        /* contains the page directory */
        struct amdgpu_bo        *page_directory;
        unsigned                max_pde_used;
+       struct fence            *page_directory_fence;
 
        /* array of page tables, one for each page directory entry */
        struct amdgpu_vm_pt     *page_tables;
index 11edac7..e4424b4 100644 (file)
@@ -551,6 +551,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
        if (r)
                return r;
 
+       r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+       if (r)
+               return r;
+
        r = amdgpu_vm_clear_freed(adev, vm);
        if (r)
                return r;
index 2fc909f..a78a206 100644 (file)
@@ -495,7 +495,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                                         &fence);
                if (r)
                        goto error_free;
+
                amdgpu_bo_fence(pd, fence, true);
+               fence_put(vm->page_directory_fence);
+               vm->page_directory_fence = fence_get(fence);
                fence_put(fence);
        }
 
@@ -1291,6 +1294,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                return -ENOMEM;
        }
 
+       vm->page_directory_fence = NULL;
+
        r = amdgpu_bo_create(adev, pd_size, align, true,
                             AMDGPU_GEM_DOMAIN_VRAM, 0,
                             NULL, &vm->page_directory);
@@ -1339,6 +1344,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        kfree(vm->page_tables);
 
        amdgpu_bo_unref(&vm->page_directory);
+       fence_put(vm->page_directory_fence);
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                amdgpu_fence_unref(&vm->ids[i].flushed_updates);